Version:  2.0.40 2.2.26 2.4.37 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6

Linux/drivers/net/ethernet/cadence/macb.c

  1 /*
  2  * Cadence MACB/GEM Ethernet Controller driver
  3  *
  4  * Copyright (C) 2004-2006 Atmel Corporation
  5  *
  6  * This program is free software; you can redistribute it and/or modify
  7  * it under the terms of the GNU General Public License version 2 as
  8  * published by the Free Software Foundation.
  9  */
 10 
 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 12 #include <linux/clk.h>
 13 #include <linux/module.h>
 14 #include <linux/moduleparam.h>
 15 #include <linux/kernel.h>
 16 #include <linux/types.h>
 17 #include <linux/circ_buf.h>
 18 #include <linux/slab.h>
 19 #include <linux/init.h>
 20 #include <linux/io.h>
 21 #include <linux/gpio.h>
 22 #include <linux/gpio/consumer.h>
 23 #include <linux/interrupt.h>
 24 #include <linux/netdevice.h>
 25 #include <linux/etherdevice.h>
 26 #include <linux/dma-mapping.h>
 27 #include <linux/platform_data/macb.h>
 28 #include <linux/platform_device.h>
 29 #include <linux/phy.h>
 30 #include <linux/of.h>
 31 #include <linux/of_device.h>
 32 #include <linux/of_gpio.h>
 33 #include <linux/of_mdio.h>
 34 #include <linux/of_net.h>
 35 
 36 #include "macb.h"
 37 
 38 #define MACB_RX_BUFFER_SIZE     128
 39 #define RX_BUFFER_MULTIPLE      64  /* bytes */
 40 #define RX_RING_SIZE            512 /* must be power of 2 */
 41 #define RX_RING_BYTES           (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 42 
 43 #define TX_RING_SIZE            128 /* must be power of 2 */
 44 #define TX_RING_BYTES           (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 45 
 46 /* level of occupied TX descriptors under which we wake up TX process */
 47 #define MACB_TX_WAKEUP_THRESH   (3 * TX_RING_SIZE / 4)
 48 
 49 #define MACB_RX_INT_FLAGS       (MACB_BIT(RCOMP) | MACB_BIT(RXUBR)      \
 50                                  | MACB_BIT(ISR_ROVR))
 51 #define MACB_TX_ERR_FLAGS       (MACB_BIT(ISR_TUND)                     \
 52                                         | MACB_BIT(ISR_RLE)             \
 53                                         | MACB_BIT(TXERR))
 54 #define MACB_TX_INT_FLAGS       (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 55 
 56 #define MACB_MAX_TX_LEN         ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 57 #define GEM_MAX_TX_LEN          ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 58 
 59 #define GEM_MTU_MIN_SIZE        68
 60 
 61 #define MACB_WOL_HAS_MAGIC_PACKET       (0x1 << 0)
 62 #define MACB_WOL_ENABLED                (0x1 << 1)
 63 
 64 /*
 65  * Graceful stop timeouts in us. We should allow up to
 66  * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
 67  */
 68 #define MACB_HALT_TIMEOUT       1230
 69 
 70 /* Ring buffer accessors */
 71 static unsigned int macb_tx_ring_wrap(unsigned int index)
 72 {
 73         return index & (TX_RING_SIZE - 1);
 74 }
 75 
 76 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
 77                                           unsigned int index)
 78 {
 79         return &queue->tx_ring[macb_tx_ring_wrap(index)];
 80 }
 81 
 82 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
 83                                        unsigned int index)
 84 {
 85         return &queue->tx_skb[macb_tx_ring_wrap(index)];
 86 }
 87 
 88 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
 89 {
 90         dma_addr_t offset;
 91 
 92         offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
 93 
 94         return queue->tx_ring_dma + offset;
 95 }
 96 
 97 static unsigned int macb_rx_ring_wrap(unsigned int index)
 98 {
 99         return index & (RX_RING_SIZE - 1);
100 }
101 
102 static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
103 {
104         return &bp->rx_ring[macb_rx_ring_wrap(index)];
105 }
106 
107 static void *macb_rx_buffer(struct macb *bp, unsigned int index)
108 {
109         return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
110 }
111 
112 /* I/O accessors */
113 static u32 hw_readl_native(struct macb *bp, int offset)
114 {
115         return __raw_readl(bp->regs + offset);
116 }
117 
118 static void hw_writel_native(struct macb *bp, int offset, u32 value)
119 {
120         __raw_writel(value, bp->regs + offset);
121 }
122 
123 static u32 hw_readl(struct macb *bp, int offset)
124 {
125         return readl_relaxed(bp->regs + offset);
126 }
127 
128 static void hw_writel(struct macb *bp, int offset, u32 value)
129 {
130         writel_relaxed(value, bp->regs + offset);
131 }
132 
133 /*
134  * Find the CPU endianness by using the loopback bit of NCR register. When the
135  * CPU is in big endian we need to program swaped mode for management
136  * descriptor access.
137  */
138 static bool hw_is_native_io(void __iomem *addr)
139 {
140         u32 value = MACB_BIT(LLB);
141 
142         __raw_writel(value, addr + MACB_NCR);
143         value = __raw_readl(addr + MACB_NCR);
144 
145         /* Write 0 back to disable everything */
146         __raw_writel(0, addr + MACB_NCR);
147 
148         return value == MACB_BIT(LLB);
149 }
150 
151 static bool hw_is_gem(void __iomem *addr, bool native_io)
152 {
153         u32 id;
154 
155         if (native_io)
156                 id = __raw_readl(addr + MACB_MID);
157         else
158                 id = readl_relaxed(addr + MACB_MID);
159 
160         return MACB_BFEXT(IDNUM, id) >= 0x2;
161 }
162 
163 static void macb_set_hwaddr(struct macb *bp)
164 {
165         u32 bottom;
166         u16 top;
167 
168         bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
169         macb_or_gem_writel(bp, SA1B, bottom);
170         top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
171         macb_or_gem_writel(bp, SA1T, top);
172 
173         /* Clear unused address register sets */
174         macb_or_gem_writel(bp, SA2B, 0);
175         macb_or_gem_writel(bp, SA2T, 0);
176         macb_or_gem_writel(bp, SA3B, 0);
177         macb_or_gem_writel(bp, SA3T, 0);
178         macb_or_gem_writel(bp, SA4B, 0);
179         macb_or_gem_writel(bp, SA4T, 0);
180 }
181 
182 static void macb_get_hwaddr(struct macb *bp)
183 {
184         struct macb_platform_data *pdata;
185         u32 bottom;
186         u16 top;
187         u8 addr[6];
188         int i;
189 
190         pdata = dev_get_platdata(&bp->pdev->dev);
191 
192         /* Check all 4 address register for vaild address */
193         for (i = 0; i < 4; i++) {
194                 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
195                 top = macb_or_gem_readl(bp, SA1T + i * 8);
196 
197                 if (pdata && pdata->rev_eth_addr) {
198                         addr[5] = bottom & 0xff;
199                         addr[4] = (bottom >> 8) & 0xff;
200                         addr[3] = (bottom >> 16) & 0xff;
201                         addr[2] = (bottom >> 24) & 0xff;
202                         addr[1] = top & 0xff;
203                         addr[0] = (top & 0xff00) >> 8;
204                 } else {
205                         addr[0] = bottom & 0xff;
206                         addr[1] = (bottom >> 8) & 0xff;
207                         addr[2] = (bottom >> 16) & 0xff;
208                         addr[3] = (bottom >> 24) & 0xff;
209                         addr[4] = top & 0xff;
210                         addr[5] = (top >> 8) & 0xff;
211                 }
212 
213                 if (is_valid_ether_addr(addr)) {
214                         memcpy(bp->dev->dev_addr, addr, sizeof(addr));
215                         return;
216                 }
217         }
218 
219         dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
220         eth_hw_addr_random(bp->dev);
221 }
222 
223 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
224 {
225         struct macb *bp = bus->priv;
226         int value;
227 
228         macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
229                               | MACB_BF(RW, MACB_MAN_READ)
230                               | MACB_BF(PHYA, mii_id)
231                               | MACB_BF(REGA, regnum)
232                               | MACB_BF(CODE, MACB_MAN_CODE)));
233 
234         /* wait for end of transfer */
235         while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
236                 cpu_relax();
237 
238         value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
239 
240         return value;
241 }
242 
243 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
244                            u16 value)
245 {
246         struct macb *bp = bus->priv;
247 
248         macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
249                               | MACB_BF(RW, MACB_MAN_WRITE)
250                               | MACB_BF(PHYA, mii_id)
251                               | MACB_BF(REGA, regnum)
252                               | MACB_BF(CODE, MACB_MAN_CODE)
253                               | MACB_BF(DATA, value)));
254 
255         /* wait for end of transfer */
256         while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
257                 cpu_relax();
258 
259         return 0;
260 }
261 
262 /**
263  * macb_set_tx_clk() - Set a clock to a new frequency
264  * @clk         Pointer to the clock to change
265  * @rate        New frequency in Hz
266  * @dev         Pointer to the struct net_device
267  */
268 static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
269 {
270         long ferr, rate, rate_rounded;
271 
272         if (!clk)
273                 return;
274 
275         switch (speed) {
276         case SPEED_10:
277                 rate = 2500000;
278                 break;
279         case SPEED_100:
280                 rate = 25000000;
281                 break;
282         case SPEED_1000:
283                 rate = 125000000;
284                 break;
285         default:
286                 return;
287         }
288 
289         rate_rounded = clk_round_rate(clk, rate);
290         if (rate_rounded < 0)
291                 return;
292 
293         /* RGMII allows 50 ppm frequency error. Test and warn if this limit
294          * is not satisfied.
295          */
296         ferr = abs(rate_rounded - rate);
297         ferr = DIV_ROUND_UP(ferr, rate / 100000);
298         if (ferr > 5)
299                 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
300                                 rate);
301 
302         if (clk_set_rate(clk, rate_rounded))
303                 netdev_err(dev, "adjusting tx_clk failed.\n");
304 }
305 
306 static void macb_handle_link_change(struct net_device *dev)
307 {
308         struct macb *bp = netdev_priv(dev);
309         struct phy_device *phydev = bp->phy_dev;
310         unsigned long flags;
311         int status_change = 0;
312 
313         spin_lock_irqsave(&bp->lock, flags);
314 
315         if (phydev->link) {
316                 if ((bp->speed != phydev->speed) ||
317                     (bp->duplex != phydev->duplex)) {
318                         u32 reg;
319 
320                         reg = macb_readl(bp, NCFGR);
321                         reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
322                         if (macb_is_gem(bp))
323                                 reg &= ~GEM_BIT(GBE);
324 
325                         if (phydev->duplex)
326                                 reg |= MACB_BIT(FD);
327                         if (phydev->speed == SPEED_100)
328                                 reg |= MACB_BIT(SPD);
329                         if (phydev->speed == SPEED_1000 &&
330                             bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
331                                 reg |= GEM_BIT(GBE);
332 
333                         macb_or_gem_writel(bp, NCFGR, reg);
334 
335                         bp->speed = phydev->speed;
336                         bp->duplex = phydev->duplex;
337                         status_change = 1;
338                 }
339         }
340 
341         if (phydev->link != bp->link) {
342                 if (!phydev->link) {
343                         bp->speed = 0;
344                         bp->duplex = -1;
345                 }
346                 bp->link = phydev->link;
347 
348                 status_change = 1;
349         }
350 
351         spin_unlock_irqrestore(&bp->lock, flags);
352 
353         if (status_change) {
354                 if (phydev->link) {
355                         /* Update the TX clock rate if and only if the link is
356                          * up and there has been a link change.
357                          */
358                         macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
359 
360                         netif_carrier_on(dev);
361                         netdev_info(dev, "link up (%d/%s)\n",
362                                     phydev->speed,
363                                     phydev->duplex == DUPLEX_FULL ?
364                                     "Full" : "Half");
365                 } else {
366                         netif_carrier_off(dev);
367                         netdev_info(dev, "link down\n");
368                 }
369         }
370 }
371 
372 /* based on au1000_eth. c*/
373 static int macb_mii_probe(struct net_device *dev)
374 {
375         struct macb *bp = netdev_priv(dev);
376         struct macb_platform_data *pdata;
377         struct phy_device *phydev;
378         int phy_irq;
379         int ret;
380 
381         phydev = phy_find_first(bp->mii_bus);
382         if (!phydev) {
383                 netdev_err(dev, "no PHY found\n");
384                 return -ENXIO;
385         }
386 
387         pdata = dev_get_platdata(&bp->pdev->dev);
388         if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
389                 ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
390                 if (!ret) {
391                         phy_irq = gpio_to_irq(pdata->phy_irq_pin);
392                         phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
393                 }
394         }
395 
396         /* attach the mac to the phy */
397         ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
398                                  bp->phy_interface);
399         if (ret) {
400                 netdev_err(dev, "Could not attach to PHY\n");
401                 return ret;
402         }
403 
404         /* mask with MAC supported features */
405         if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
406                 phydev->supported &= PHY_GBIT_FEATURES;
407         else
408                 phydev->supported &= PHY_BASIC_FEATURES;
409 
410         if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
411                 phydev->supported &= ~SUPPORTED_1000baseT_Half;
412 
413         phydev->advertising = phydev->supported;
414 
415         bp->link = 0;
416         bp->speed = 0;
417         bp->duplex = -1;
418         bp->phy_dev = phydev;
419 
420         return 0;
421 }
422 
423 static int macb_mii_init(struct macb *bp)
424 {
425         struct macb_platform_data *pdata;
426         struct device_node *np;
427         int err = -ENXIO, i;
428 
429         /* Enable management port */
430         macb_writel(bp, NCR, MACB_BIT(MPE));
431 
432         bp->mii_bus = mdiobus_alloc();
433         if (bp->mii_bus == NULL) {
434                 err = -ENOMEM;
435                 goto err_out;
436         }
437 
438         bp->mii_bus->name = "MACB_mii_bus";
439         bp->mii_bus->read = &macb_mdio_read;
440         bp->mii_bus->write = &macb_mdio_write;
441         snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
442                 bp->pdev->name, bp->pdev->id);
443         bp->mii_bus->priv = bp;
444         bp->mii_bus->parent = &bp->pdev->dev;
445         pdata = dev_get_platdata(&bp->pdev->dev);
446 
447         dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
448 
449         np = bp->pdev->dev.of_node;
450         if (np) {
451                 /* try dt phy registration */
452                 err = of_mdiobus_register(bp->mii_bus, np);
453 
454                 /* fallback to standard phy registration if no phy were
455                    found during dt phy registration */
456                 if (!err && !phy_find_first(bp->mii_bus)) {
457                         for (i = 0; i < PHY_MAX_ADDR; i++) {
458                                 struct phy_device *phydev;
459 
460                                 phydev = mdiobus_scan(bp->mii_bus, i);
461                                 if (IS_ERR(phydev) &&
462                                     PTR_ERR(phydev) != -ENODEV) {
463                                         err = PTR_ERR(phydev);
464                                         break;
465                                 }
466                         }
467 
468                         if (err)
469                                 goto err_out_unregister_bus;
470                 }
471         } else {
472                 if (pdata)
473                         bp->mii_bus->phy_mask = pdata->phy_mask;
474 
475                 err = mdiobus_register(bp->mii_bus);
476         }
477 
478         if (err)
479                 goto err_out_free_mdiobus;
480 
481         err = macb_mii_probe(bp->dev);
482         if (err)
483                 goto err_out_unregister_bus;
484 
485         return 0;
486 
487 err_out_unregister_bus:
488         mdiobus_unregister(bp->mii_bus);
489 err_out_free_mdiobus:
490         mdiobus_free(bp->mii_bus);
491 err_out:
492         return err;
493 }
494 
495 static void macb_update_stats(struct macb *bp)
496 {
497         u32 *p = &bp->hw_stats.macb.rx_pause_frames;
498         u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
499         int offset = MACB_PFR;
500 
501         WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
502 
503         for(; p < end; p++, offset += 4)
504                 *p += bp->macb_reg_readl(bp, offset);
505 }
506 
507 static int macb_halt_tx(struct macb *bp)
508 {
509         unsigned long   halt_time, timeout;
510         u32             status;
511 
512         macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
513 
514         timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
515         do {
516                 halt_time = jiffies;
517                 status = macb_readl(bp, TSR);
518                 if (!(status & MACB_BIT(TGO)))
519                         return 0;
520 
521                 usleep_range(10, 250);
522         } while (time_before(halt_time, timeout));
523 
524         return -ETIMEDOUT;
525 }
526 
527 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
528 {
529         if (tx_skb->mapping) {
530                 if (tx_skb->mapped_as_page)
531                         dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
532                                        tx_skb->size, DMA_TO_DEVICE);
533                 else
534                         dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
535                                          tx_skb->size, DMA_TO_DEVICE);
536                 tx_skb->mapping = 0;
537         }
538 
539         if (tx_skb->skb) {
540                 dev_kfree_skb_any(tx_skb->skb);
541                 tx_skb->skb = NULL;
542         }
543 }
544 
545 static void macb_tx_error_task(struct work_struct *work)
546 {
547         struct macb_queue       *queue = container_of(work, struct macb_queue,
548                                                       tx_error_task);
549         struct macb             *bp = queue->bp;
550         struct macb_tx_skb      *tx_skb;
551         struct macb_dma_desc    *desc;
552         struct sk_buff          *skb;
553         unsigned int            tail;
554         unsigned long           flags;
555 
556         netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
557                     (unsigned int)(queue - bp->queues),
558                     queue->tx_tail, queue->tx_head);
559 
560         /* Prevent the queue IRQ handlers from running: each of them may call
561          * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
562          * As explained below, we have to halt the transmission before updating
563          * TBQP registers so we call netif_tx_stop_all_queues() to notify the
564          * network engine about the macb/gem being halted.
565          */
566         spin_lock_irqsave(&bp->lock, flags);
567 
568         /* Make sure nobody is trying to queue up new packets */
569         netif_tx_stop_all_queues(bp->dev);
570 
571         /*
572          * Stop transmission now
573          * (in case we have just queued new packets)
574          * macb/gem must be halted to write TBQP register
575          */
576         if (macb_halt_tx(bp))
577                 /* Just complain for now, reinitializing TX path can be good */
578                 netdev_err(bp->dev, "BUG: halt tx timed out\n");
579 
580         /*
581          * Treat frames in TX queue including the ones that caused the error.
582          * Free transmit buffers in upper layer.
583          */
584         for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
585                 u32     ctrl;
586 
587                 desc = macb_tx_desc(queue, tail);
588                 ctrl = desc->ctrl;
589                 tx_skb = macb_tx_skb(queue, tail);
590                 skb = tx_skb->skb;
591 
592                 if (ctrl & MACB_BIT(TX_USED)) {
593                         /* skb is set for the last buffer of the frame */
594                         while (!skb) {
595                                 macb_tx_unmap(bp, tx_skb);
596                                 tail++;
597                                 tx_skb = macb_tx_skb(queue, tail);
598                                 skb = tx_skb->skb;
599                         }
600 
601                         /* ctrl still refers to the first buffer descriptor
602                          * since it's the only one written back by the hardware
603                          */
604                         if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
605                                 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
606                                             macb_tx_ring_wrap(tail), skb->data);
607                                 bp->stats.tx_packets++;
608                                 bp->stats.tx_bytes += skb->len;
609                         }
610                 } else {
611                         /*
612                          * "Buffers exhausted mid-frame" errors may only happen
613                          * if the driver is buggy, so complain loudly about those.
614                          * Statistics are updated by hardware.
615                          */
616                         if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
617                                 netdev_err(bp->dev,
618                                            "BUG: TX buffers exhausted mid-frame\n");
619 
620                         desc->ctrl = ctrl | MACB_BIT(TX_USED);
621                 }
622 
623                 macb_tx_unmap(bp, tx_skb);
624         }
625 
626         /* Set end of TX queue */
627         desc = macb_tx_desc(queue, 0);
628         desc->addr = 0;
629         desc->ctrl = MACB_BIT(TX_USED);
630 
631         /* Make descriptor updates visible to hardware */
632         wmb();
633 
634         /* Reinitialize the TX desc queue */
635         queue_writel(queue, TBQP, queue->tx_ring_dma);
636         /* Make TX ring reflect state of hardware */
637         queue->tx_head = 0;
638         queue->tx_tail = 0;
639 
640         /* Housework before enabling TX IRQ */
641         macb_writel(bp, TSR, macb_readl(bp, TSR));
642         queue_writel(queue, IER, MACB_TX_INT_FLAGS);
643 
644         /* Now we are ready to start transmission again */
645         netif_tx_start_all_queues(bp->dev);
646         macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
647 
648         spin_unlock_irqrestore(&bp->lock, flags);
649 }
650 
651 static void macb_tx_interrupt(struct macb_queue *queue)
652 {
653         unsigned int tail;
654         unsigned int head;
655         u32 status;
656         struct macb *bp = queue->bp;
657         u16 queue_index = queue - bp->queues;
658 
659         status = macb_readl(bp, TSR);
660         macb_writel(bp, TSR, status);
661 
662         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
663                 queue_writel(queue, ISR, MACB_BIT(TCOMP));
664 
665         netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
666                 (unsigned long)status);
667 
668         head = queue->tx_head;
669         for (tail = queue->tx_tail; tail != head; tail++) {
670                 struct macb_tx_skb      *tx_skb;
671                 struct sk_buff          *skb;
672                 struct macb_dma_desc    *desc;
673                 u32                     ctrl;
674 
675                 desc = macb_tx_desc(queue, tail);
676 
677                 /* Make hw descriptor updates visible to CPU */
678                 rmb();
679 
680                 ctrl = desc->ctrl;
681 
682                 /* TX_USED bit is only set by hardware on the very first buffer
683                  * descriptor of the transmitted frame.
684                  */
685                 if (!(ctrl & MACB_BIT(TX_USED)))
686                         break;
687 
688                 /* Process all buffers of the current transmitted frame */
689                 for (;; tail++) {
690                         tx_skb = macb_tx_skb(queue, tail);
691                         skb = tx_skb->skb;
692 
693                         /* First, update TX stats if needed */
694                         if (skb) {
695                                 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
696                                             macb_tx_ring_wrap(tail), skb->data);
697                                 bp->stats.tx_packets++;
698                                 bp->stats.tx_bytes += skb->len;
699                         }
700 
701                         /* Now we can safely release resources */
702                         macb_tx_unmap(bp, tx_skb);
703 
704                         /* skb is set only for the last buffer of the frame.
705                          * WARNING: at this point skb has been freed by
706                          * macb_tx_unmap().
707                          */
708                         if (skb)
709                                 break;
710                 }
711         }
712 
713         queue->tx_tail = tail;
714         if (__netif_subqueue_stopped(bp->dev, queue_index) &&
715             CIRC_CNT(queue->tx_head, queue->tx_tail,
716                      TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
717                 netif_wake_subqueue(bp->dev, queue_index);
718 }
719 
720 static void gem_rx_refill(struct macb *bp)
721 {
722         unsigned int            entry;
723         struct sk_buff          *skb;
724         dma_addr_t              paddr;
725 
726         while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
727                 entry = macb_rx_ring_wrap(bp->rx_prepared_head);
728 
729                 /* Make hw descriptor updates visible to CPU */
730                 rmb();
731 
732                 bp->rx_prepared_head++;
733 
734                 if (bp->rx_skbuff[entry] == NULL) {
735                         /* allocate sk_buff for this free entry in ring */
736                         skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
737                         if (unlikely(skb == NULL)) {
738                                 netdev_err(bp->dev,
739                                            "Unable to allocate sk_buff\n");
740                                 break;
741                         }
742 
743                         /* now fill corresponding descriptor entry */
744                         paddr = dma_map_single(&bp->pdev->dev, skb->data,
745                                                bp->rx_buffer_size, DMA_FROM_DEVICE);
746                         if (dma_mapping_error(&bp->pdev->dev, paddr)) {
747                                 dev_kfree_skb(skb);
748                                 break;
749                         }
750 
751                         bp->rx_skbuff[entry] = skb;
752 
753                         if (entry == RX_RING_SIZE - 1)
754                                 paddr |= MACB_BIT(RX_WRAP);
755                         bp->rx_ring[entry].addr = paddr;
756                         bp->rx_ring[entry].ctrl = 0;
757 
758                         /* properly align Ethernet header */
759                         skb_reserve(skb, NET_IP_ALIGN);
760                 } else {
761                         bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
762                         bp->rx_ring[entry].ctrl = 0;
763                 }
764         }
765 
766         /* Make descriptor updates visible to hardware */
767         wmb();
768 
769         netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
770                    bp->rx_prepared_head, bp->rx_tail);
771 }
772 
773 /* Mark DMA descriptors from begin up to and not including end as unused */
774 static void discard_partial_frame(struct macb *bp, unsigned int begin,
775                                   unsigned int end)
776 {
777         unsigned int frag;
778 
779         for (frag = begin; frag != end; frag++) {
780                 struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
781                 desc->addr &= ~MACB_BIT(RX_USED);
782         }
783 
784         /* Make descriptor updates visible to hardware */
785         wmb();
786 
787         /*
788          * When this happens, the hardware stats registers for
789          * whatever caused this is updated, so we don't have to record
790          * anything.
791          */
792 }
793 
794 static int gem_rx(struct macb *bp, int budget)
795 {
796         unsigned int            len;
797         unsigned int            entry;
798         struct sk_buff          *skb;
799         struct macb_dma_desc    *desc;
800         int                     count = 0;
801 
802         while (count < budget) {
803                 u32 addr, ctrl;
804 
805                 entry = macb_rx_ring_wrap(bp->rx_tail);
806                 desc = &bp->rx_ring[entry];
807 
808                 /* Make hw descriptor updates visible to CPU */
809                 rmb();
810 
811                 addr = desc->addr;
812                 ctrl = desc->ctrl;
813 
814                 if (!(addr & MACB_BIT(RX_USED)))
815                         break;
816 
817                 bp->rx_tail++;
818                 count++;
819 
820                 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
821                         netdev_err(bp->dev,
822                                    "not whole frame pointed by descriptor\n");
823                         bp->stats.rx_dropped++;
824                         break;
825                 }
826                 skb = bp->rx_skbuff[entry];
827                 if (unlikely(!skb)) {
828                         netdev_err(bp->dev,
829                                    "inconsistent Rx descriptor chain\n");
830                         bp->stats.rx_dropped++;
831                         break;
832                 }
833                 /* now everything is ready for receiving packet */
834                 bp->rx_skbuff[entry] = NULL;
835                 len = ctrl & bp->rx_frm_len_mask;
836 
837                 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
838 
839                 skb_put(skb, len);
840                 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
841                 dma_unmap_single(&bp->pdev->dev, addr,
842                                  bp->rx_buffer_size, DMA_FROM_DEVICE);
843 
844                 skb->protocol = eth_type_trans(skb, bp->dev);
845                 skb_checksum_none_assert(skb);
846                 if (bp->dev->features & NETIF_F_RXCSUM &&
847                     !(bp->dev->flags & IFF_PROMISC) &&
848                     GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
849                         skb->ip_summed = CHECKSUM_UNNECESSARY;
850 
851                 bp->stats.rx_packets++;
852                 bp->stats.rx_bytes += skb->len;
853 
854 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
855                 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
856                             skb->len, skb->csum);
857                 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
858                                skb_mac_header(skb), 16, true);
859                 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
860                                skb->data, 32, true);
861 #endif
862 
863                 netif_receive_skb(skb);
864         }
865 
866         gem_rx_refill(bp);
867 
868         return count;
869 }
870 
871 static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
872                          unsigned int last_frag)
873 {
874         unsigned int len;
875         unsigned int frag;
876         unsigned int offset;
877         struct sk_buff *skb;
878         struct macb_dma_desc *desc;
879 
880         desc = macb_rx_desc(bp, last_frag);
881         len = desc->ctrl & bp->rx_frm_len_mask;
882 
883         netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
884                 macb_rx_ring_wrap(first_frag),
885                 macb_rx_ring_wrap(last_frag), len);
886 
887         /*
888          * The ethernet header starts NET_IP_ALIGN bytes into the
889          * first buffer. Since the header is 14 bytes, this makes the
890          * payload word-aligned.
891          *
892          * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
893          * the two padding bytes into the skb so that we avoid hitting
894          * the slowpath in memcpy(), and pull them off afterwards.
895          */
896         skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
897         if (!skb) {
898                 bp->stats.rx_dropped++;
899                 for (frag = first_frag; ; frag++) {
900                         desc = macb_rx_desc(bp, frag);
901                         desc->addr &= ~MACB_BIT(RX_USED);
902                         if (frag == last_frag)
903                                 break;
904                 }
905 
906                 /* Make descriptor updates visible to hardware */
907                 wmb();
908 
909                 return 1;
910         }
911 
912         offset = 0;
913         len += NET_IP_ALIGN;
914         skb_checksum_none_assert(skb);
915         skb_put(skb, len);
916 
917         for (frag = first_frag; ; frag++) {
918                 unsigned int frag_len = bp->rx_buffer_size;
919 
920                 if (offset + frag_len > len) {
921                         if (unlikely(frag != last_frag)) {
922                                 dev_kfree_skb_any(skb);
923                                 return -1;
924                         }
925                         frag_len = len - offset;
926                 }
927                 skb_copy_to_linear_data_offset(skb, offset,
928                                 macb_rx_buffer(bp, frag), frag_len);
929                 offset += bp->rx_buffer_size;
930                 desc = macb_rx_desc(bp, frag);
931                 desc->addr &= ~MACB_BIT(RX_USED);
932 
933                 if (frag == last_frag)
934                         break;
935         }
936 
937         /* Make descriptor updates visible to hardware */
938         wmb();
939 
940         __skb_pull(skb, NET_IP_ALIGN);
941         skb->protocol = eth_type_trans(skb, bp->dev);
942 
943         bp->stats.rx_packets++;
944         bp->stats.rx_bytes += skb->len;
945         netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
946                    skb->len, skb->csum);
947         netif_receive_skb(skb);
948 
949         return 0;
950 }
951 
952 static inline void macb_init_rx_ring(struct macb *bp)
953 {
954         dma_addr_t addr;
955         int i;
956 
957         addr = bp->rx_buffers_dma;
958         for (i = 0; i < RX_RING_SIZE; i++) {
959                 bp->rx_ring[i].addr = addr;
960                 bp->rx_ring[i].ctrl = 0;
961                 addr += bp->rx_buffer_size;
962         }
963         bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
964 }
965 
966 static int macb_rx(struct macb *bp, int budget)
967 {
968         bool reset_rx_queue = false;
969         int received = 0;
970         unsigned int tail;
971         int first_frag = -1;
972 
973         for (tail = bp->rx_tail; budget > 0; tail++) {
974                 struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
975                 u32 addr, ctrl;
976 
977                 /* Make hw descriptor updates visible to CPU */
978                 rmb();
979 
980                 addr = desc->addr;
981                 ctrl = desc->ctrl;
982 
983                 if (!(addr & MACB_BIT(RX_USED)))
984                         break;
985 
986                 if (ctrl & MACB_BIT(RX_SOF)) {
987                         if (first_frag != -1)
988                                 discard_partial_frame(bp, first_frag, tail);
989                         first_frag = tail;
990                 }
991 
992                 if (ctrl & MACB_BIT(RX_EOF)) {
993                         int dropped;
994 
995                         if (unlikely(first_frag == -1)) {
996                                 reset_rx_queue = true;
997                                 continue;
998                         }
999 
1000                         dropped = macb_rx_frame(bp, first_frag, tail);
1001                         first_frag = -1;
1002                         if (unlikely(dropped < 0)) {
1003                                 reset_rx_queue = true;
1004                                 continue;
1005                         }
1006                         if (!dropped) {
1007                                 received++;
1008                                 budget--;
1009                         }
1010                 }
1011         }
1012 
1013         if (unlikely(reset_rx_queue)) {
1014                 unsigned long flags;
1015                 u32 ctrl;
1016 
1017                 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1018 
1019                 spin_lock_irqsave(&bp->lock, flags);
1020 
1021                 ctrl = macb_readl(bp, NCR);
1022                 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1023 
1024                 macb_init_rx_ring(bp);
1025                 macb_writel(bp, RBQP, bp->rx_ring_dma);
1026 
1027                 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1028 
1029                 spin_unlock_irqrestore(&bp->lock, flags);
1030                 return received;
1031         }
1032 
1033         if (first_frag != -1)
1034                 bp->rx_tail = first_frag;
1035         else
1036                 bp->rx_tail = tail;
1037 
1038         return received;
1039 }
1040 
1041 static int macb_poll(struct napi_struct *napi, int budget)
1042 {
1043         struct macb *bp = container_of(napi, struct macb, napi);
1044         int work_done;
1045         u32 status;
1046 
1047         status = macb_readl(bp, RSR);
1048         macb_writel(bp, RSR, status);
1049 
1050         work_done = 0;
1051 
1052         netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
1053                    (unsigned long)status, budget);
1054 
1055         work_done = bp->macbgem_ops.mog_rx(bp, budget);
1056         if (work_done < budget) {
1057                 napi_complete(napi);
1058 
1059                 /* Packets received while interrupts were disabled */
1060                 status = macb_readl(bp, RSR);
1061                 if (status) {
1062                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1063                                 macb_writel(bp, ISR, MACB_BIT(RCOMP));
1064                         napi_reschedule(napi);
1065                 } else {
1066                         macb_writel(bp, IER, MACB_RX_INT_FLAGS);
1067                 }
1068         }
1069 
1070         /* TODO: Handle errors */
1071 
1072         return work_done;
1073 }
1074 
1075 static irqreturn_t macb_interrupt(int irq, void *dev_id)
1076 {
1077         struct macb_queue *queue = dev_id;
1078         struct macb *bp = queue->bp;
1079         struct net_device *dev = bp->dev;
1080         u32 status, ctrl;
1081 
1082         status = queue_readl(queue, ISR);
1083 
1084         if (unlikely(!status))
1085                 return IRQ_NONE;
1086 
1087         spin_lock(&bp->lock);
1088 
1089         while (status) {
1090                 /* close possible race with dev_close */
1091                 if (unlikely(!netif_running(dev))) {
1092                         queue_writel(queue, IDR, -1);
1093                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1094                                 queue_writel(queue, ISR, -1);
1095                         break;
1096                 }
1097 
1098                 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1099                             (unsigned int)(queue - bp->queues),
1100                             (unsigned long)status);
1101 
1102                 if (status & MACB_RX_INT_FLAGS) {
1103                         /*
1104                          * There's no point taking any more interrupts
1105                          * until we have processed the buffers. The
1106                          * scheduling call may fail if the poll routine
1107                          * is already scheduled, so disable interrupts
1108                          * now.
1109                          */
1110                         queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
1111                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1112                                 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1113 
1114                         if (napi_schedule_prep(&bp->napi)) {
1115                                 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1116                                 __napi_schedule(&bp->napi);
1117                         }
1118                 }
1119 
1120                 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
1121                         queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1122                         schedule_work(&queue->tx_error_task);
1123 
1124                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1125                                 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1126 
1127                         break;
1128                 }
1129 
1130                 if (status & MACB_BIT(TCOMP))
1131                         macb_tx_interrupt(queue);
1132 
1133                 /*
1134                  * Link change detection isn't possible with RMII, so we'll
1135                  * add that if/when we get our hands on a full-blown MII PHY.
1136                  */
1137 
1138                 /* There is a hardware issue under heavy load where DMA can
1139                  * stop, this causes endless "used buffer descriptor read"
1140                  * interrupts but it can be cleared by re-enabling RX. See
1141                  * the at91 manual, section 41.3.1 or the Zynq manual
1142                  * section 16.7.4 for details.
1143                  */
1144                 if (status & MACB_BIT(RXUBR)) {
1145                         ctrl = macb_readl(bp, NCR);
1146                         macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1147                         macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1148 
1149                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1150                                 queue_writel(queue, ISR, MACB_BIT(RXUBR));
1151                 }
1152 
1153                 if (status & MACB_BIT(ISR_ROVR)) {
1154                         /* We missed at least one packet */
1155                         if (macb_is_gem(bp))
1156                                 bp->hw_stats.gem.rx_overruns++;
1157                         else
1158                                 bp->hw_stats.macb.rx_overruns++;
1159 
1160                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1161                                 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
1162                 }
1163 
1164                 if (status & MACB_BIT(HRESP)) {
1165                         /*
1166                          * TODO: Reset the hardware, and maybe move the
1167                          * netdev_err to a lower-priority context as well
1168                          * (work queue?)
1169                          */
1170                         netdev_err(dev, "DMA bus error: HRESP not OK\n");
1171 
1172                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1173                                 queue_writel(queue, ISR, MACB_BIT(HRESP));
1174                 }
1175 
1176                 status = queue_readl(queue, ISR);
1177         }
1178 
1179         spin_unlock(&bp->lock);
1180 
1181         return IRQ_HANDLED;
1182 }
1183 
1184 #ifdef CONFIG_NET_POLL_CONTROLLER
1185 /*
1186  * Polling receive - used by netconsole and other diagnostic tools
1187  * to allow network i/o with interrupts disabled.
1188  */
1189 static void macb_poll_controller(struct net_device *dev)
1190 {
1191         struct macb *bp = netdev_priv(dev);
1192         struct macb_queue *queue;
1193         unsigned long flags;
1194         unsigned int q;
1195 
1196         local_irq_save(flags);
1197         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1198                 macb_interrupt(dev->irq, queue);
1199         local_irq_restore(flags);
1200 }
1201 #endif
1202 
1203 static unsigned int macb_tx_map(struct macb *bp,
1204                                 struct macb_queue *queue,
1205                                 struct sk_buff *skb)
1206 {
1207         dma_addr_t mapping;
1208         unsigned int len, entry, i, tx_head = queue->tx_head;
1209         struct macb_tx_skb *tx_skb = NULL;
1210         struct macb_dma_desc *desc;
1211         unsigned int offset, size, count = 0;
1212         unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
1213         unsigned int eof = 1;
1214         u32 ctrl;
1215 
1216         /* First, map non-paged data */
1217         len = skb_headlen(skb);
1218         offset = 0;
1219         while (len) {
1220                 size = min(len, bp->max_tx_length);
1221                 entry = macb_tx_ring_wrap(tx_head);
1222                 tx_skb = &queue->tx_skb[entry];
1223 
1224                 mapping = dma_map_single(&bp->pdev->dev,
1225                                          skb->data + offset,
1226                                          size, DMA_TO_DEVICE);
1227                 if (dma_mapping_error(&bp->pdev->dev, mapping))
1228                         goto dma_error;
1229 
1230                 /* Save info to properly release resources */
1231                 tx_skb->skb = NULL;
1232                 tx_skb->mapping = mapping;
1233                 tx_skb->size = size;
1234                 tx_skb->mapped_as_page = false;
1235 
1236                 len -= size;
1237                 offset += size;
1238                 count++;
1239                 tx_head++;
1240         }
1241 
1242         /* Then, map paged data from fragments */
1243         for (f = 0; f < nr_frags; f++) {
1244                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1245 
1246                 len = skb_frag_size(frag);
1247                 offset = 0;
1248                 while (len) {
1249                         size = min(len, bp->max_tx_length);
1250                         entry = macb_tx_ring_wrap(tx_head);
1251                         tx_skb = &queue->tx_skb[entry];
1252 
1253                         mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
1254                                                    offset, size, DMA_TO_DEVICE);
1255                         if (dma_mapping_error(&bp->pdev->dev, mapping))
1256                                 goto dma_error;
1257 
1258                         /* Save info to properly release resources */
1259                         tx_skb->skb = NULL;
1260                         tx_skb->mapping = mapping;
1261                         tx_skb->size = size;
1262                         tx_skb->mapped_as_page = true;
1263 
1264                         len -= size;
1265                         offset += size;
1266                         count++;
1267                         tx_head++;
1268                 }
1269         }
1270 
1271         /* Should never happen */
1272         if (unlikely(tx_skb == NULL)) {
1273                 netdev_err(bp->dev, "BUG! empty skb!\n");
1274                 return 0;
1275         }
1276 
1277         /* This is the last buffer of the frame: save socket buffer */
1278         tx_skb->skb = skb;
1279 
1280         /* Update TX ring: update buffer descriptors in reverse order
1281          * to avoid race condition
1282          */
1283 
1284         /* Set 'TX_USED' bit in buffer descriptor at tx_head position
1285          * to set the end of TX queue
1286          */
1287         i = tx_head;
1288         entry = macb_tx_ring_wrap(i);
1289         ctrl = MACB_BIT(TX_USED);
1290         desc = &queue->tx_ring[entry];
1291         desc->ctrl = ctrl;
1292 
1293         do {
1294                 i--;
1295                 entry = macb_tx_ring_wrap(i);
1296                 tx_skb = &queue->tx_skb[entry];
1297                 desc = &queue->tx_ring[entry];
1298 
1299                 ctrl = (u32)tx_skb->size;
1300                 if (eof) {
1301                         ctrl |= MACB_BIT(TX_LAST);
1302                         eof = 0;
1303                 }
1304                 if (unlikely(entry == (TX_RING_SIZE - 1)))
1305                         ctrl |= MACB_BIT(TX_WRAP);
1306 
1307                 /* Set TX buffer descriptor */
1308                 desc->addr = tx_skb->mapping;
1309                 /* desc->addr must be visible to hardware before clearing
1310                  * 'TX_USED' bit in desc->ctrl.
1311                  */
1312                 wmb();
1313                 desc->ctrl = ctrl;
1314         } while (i != queue->tx_head);
1315 
1316         queue->tx_head = tx_head;
1317 
1318         return count;
1319 
1320 dma_error:
1321         netdev_err(bp->dev, "TX DMA map failed\n");
1322 
1323         for (i = queue->tx_head; i != tx_head; i++) {
1324                 tx_skb = macb_tx_skb(queue, i);
1325 
1326                 macb_tx_unmap(bp, tx_skb);
1327         }
1328 
1329         return 0;
1330 }
1331 
1332 static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1333 {
1334         u16 queue_index = skb_get_queue_mapping(skb);
1335         struct macb *bp = netdev_priv(dev);
1336         struct macb_queue *queue = &bp->queues[queue_index];
1337         unsigned long flags;
1338         unsigned int count, nr_frags, frag_size, f;
1339 
1340 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1341         netdev_vdbg(bp->dev,
1342                    "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1343                    queue_index, skb->len, skb->head, skb->data,
1344                    skb_tail_pointer(skb), skb_end_pointer(skb));
1345         print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
1346                        skb->data, 16, true);
1347 #endif
1348 
1349         /* Count how many TX buffer descriptors are needed to send this
1350          * socket buffer: skb fragments of jumbo frames may need to be
1351          * splitted into many buffer descriptors.
1352          */
1353         count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
1354         nr_frags = skb_shinfo(skb)->nr_frags;
1355         for (f = 0; f < nr_frags; f++) {
1356                 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
1357                 count += DIV_ROUND_UP(frag_size, bp->max_tx_length);
1358         }
1359 
1360         spin_lock_irqsave(&bp->lock, flags);
1361 
1362         /* This is a hard error, log it. */
1363         if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < count) {
1364                 netif_stop_subqueue(dev, queue_index);
1365                 spin_unlock_irqrestore(&bp->lock, flags);
1366                 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
1367                            queue->tx_head, queue->tx_tail);
1368                 return NETDEV_TX_BUSY;
1369         }
1370 
1371         /* Map socket buffer for DMA transfer */
1372         if (!macb_tx_map(bp, queue, skb)) {
1373                 dev_kfree_skb_any(skb);
1374                 goto unlock;
1375         }
1376 
1377         /* Make newly initialized descriptor visible to hardware */
1378         wmb();
1379 
1380         skb_tx_timestamp(skb);
1381 
1382         macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1383 
1384         if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < 1)
1385                 netif_stop_subqueue(dev, queue_index);
1386 
1387 unlock:
1388         spin_unlock_irqrestore(&bp->lock, flags);
1389 
1390         return NETDEV_TX_OK;
1391 }
1392 
1393 static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
1394 {
1395         if (!macb_is_gem(bp)) {
1396                 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
1397         } else {
1398                 bp->rx_buffer_size = size;
1399 
1400                 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
1401                         netdev_dbg(bp->dev,
1402                                     "RX buffer must be multiple of %d bytes, expanding\n",
1403                                     RX_BUFFER_MULTIPLE);
1404                         bp->rx_buffer_size =
1405                                 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
1406                 }
1407         }
1408 
1409         netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n",
1410                    bp->dev->mtu, bp->rx_buffer_size);
1411 }
1412 
1413 static void gem_free_rx_buffers(struct macb *bp)
1414 {
1415         struct sk_buff          *skb;
1416         struct macb_dma_desc    *desc;
1417         dma_addr_t              addr;
1418         int i;
1419 
1420         if (!bp->rx_skbuff)
1421                 return;
1422 
1423         for (i = 0; i < RX_RING_SIZE; i++) {
1424                 skb = bp->rx_skbuff[i];
1425 
1426                 if (skb == NULL)
1427                         continue;
1428 
1429                 desc = &bp->rx_ring[i];
1430                 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
1431                 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1432                                  DMA_FROM_DEVICE);
1433                 dev_kfree_skb_any(skb);
1434                 skb = NULL;
1435         }
1436 
1437         kfree(bp->rx_skbuff);
1438         bp->rx_skbuff = NULL;
1439 }
1440 
1441 static void macb_free_rx_buffers(struct macb *bp)
1442 {
1443         if (bp->rx_buffers) {
1444                 dma_free_coherent(&bp->pdev->dev,
1445                                   RX_RING_SIZE * bp->rx_buffer_size,
1446                                   bp->rx_buffers, bp->rx_buffers_dma);
1447                 bp->rx_buffers = NULL;
1448         }
1449 }
1450 
1451 static void macb_free_consistent(struct macb *bp)
1452 {
1453         struct macb_queue *queue;
1454         unsigned int q;
1455 
1456         bp->macbgem_ops.mog_free_rx_buffers(bp);
1457         if (bp->rx_ring) {
1458                 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
1459                                   bp->rx_ring, bp->rx_ring_dma);
1460                 bp->rx_ring = NULL;
1461         }
1462 
1463         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1464                 kfree(queue->tx_skb);
1465                 queue->tx_skb = NULL;
1466                 if (queue->tx_ring) {
1467                         dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
1468                                           queue->tx_ring, queue->tx_ring_dma);
1469                         queue->tx_ring = NULL;
1470                 }
1471         }
1472 }
1473 
1474 static int gem_alloc_rx_buffers(struct macb *bp)
1475 {
1476         int size;
1477 
1478         size = RX_RING_SIZE * sizeof(struct sk_buff *);
1479         bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
1480         if (!bp->rx_skbuff)
1481                 return -ENOMEM;
1482         else
1483                 netdev_dbg(bp->dev,
1484                            "Allocated %d RX struct sk_buff entries at %p\n",
1485                            RX_RING_SIZE, bp->rx_skbuff);
1486         return 0;
1487 }
1488 
1489 static int macb_alloc_rx_buffers(struct macb *bp)
1490 {
1491         int size;
1492 
1493         size = RX_RING_SIZE * bp->rx_buffer_size;
1494         bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
1495                                             &bp->rx_buffers_dma, GFP_KERNEL);
1496         if (!bp->rx_buffers)
1497                 return -ENOMEM;
1498         else
1499                 netdev_dbg(bp->dev,
1500                            "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1501                            size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
1502         return 0;
1503 }
1504 
1505 static int macb_alloc_consistent(struct macb *bp)
1506 {
1507         struct macb_queue *queue;
1508         unsigned int q;
1509         int size;
1510 
1511         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1512                 size = TX_RING_BYTES;
1513                 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1514                                                     &queue->tx_ring_dma,
1515                                                     GFP_KERNEL);
1516                 if (!queue->tx_ring)
1517                         goto out_err;
1518                 netdev_dbg(bp->dev,
1519                            "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
1520                            q, size, (unsigned long)queue->tx_ring_dma,
1521                            queue->tx_ring);
1522 
1523                 size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
1524                 queue->tx_skb = kmalloc(size, GFP_KERNEL);
1525                 if (!queue->tx_skb)
1526                         goto out_err;
1527         }
1528 
1529         size = RX_RING_BYTES;
1530         bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1531                                          &bp->rx_ring_dma, GFP_KERNEL);
1532         if (!bp->rx_ring)
1533                 goto out_err;
1534         netdev_dbg(bp->dev,
1535                    "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
1536                    size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
1537 
1538         if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
1539                 goto out_err;
1540 
1541         return 0;
1542 
1543 out_err:
1544         macb_free_consistent(bp);
1545         return -ENOMEM;
1546 }
1547 
1548 static void gem_init_rings(struct macb *bp)
1549 {
1550         struct macb_queue *queue;
1551         unsigned int q;
1552         int i;
1553 
1554         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1555                 for (i = 0; i < TX_RING_SIZE; i++) {
1556                         queue->tx_ring[i].addr = 0;
1557                         queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
1558                 }
1559                 queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1560                 queue->tx_head = 0;
1561                 queue->tx_tail = 0;
1562         }
1563 
1564         bp->rx_tail = 0;
1565         bp->rx_prepared_head = 0;
1566 
1567         gem_rx_refill(bp);
1568 }
1569 
1570 static void macb_init_rings(struct macb *bp)
1571 {
1572         int i;
1573 
1574         macb_init_rx_ring(bp);
1575 
1576         for (i = 0; i < TX_RING_SIZE; i++) {
1577                 bp->queues[0].tx_ring[i].addr = 0;
1578                 bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
1579         }
1580         bp->queues[0].tx_head = 0;
1581         bp->queues[0].tx_tail = 0;
1582         bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1583 
1584         bp->rx_tail = 0;
1585 }
1586 
1587 static void macb_reset_hw(struct macb *bp)
1588 {
1589         struct macb_queue *queue;
1590         unsigned int q;
1591 
1592         /*
1593          * Disable RX and TX (XXX: Should we halt the transmission
1594          * more gracefully?)
1595          */
1596         macb_writel(bp, NCR, 0);
1597 
1598         /* Clear the stats registers (XXX: Update stats first?) */
1599         macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
1600 
1601         /* Clear all status flags */
1602         macb_writel(bp, TSR, -1);
1603         macb_writel(bp, RSR, -1);
1604 
1605         /* Disable all interrupts */
1606         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1607                 queue_writel(queue, IDR, -1);
1608                 queue_readl(queue, ISR);
1609                 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1610                         queue_writel(queue, ISR, -1);
1611         }
1612 }
1613 
1614 static u32 gem_mdc_clk_div(struct macb *bp)
1615 {
1616         u32 config;
1617         unsigned long pclk_hz = clk_get_rate(bp->pclk);
1618 
1619         if (pclk_hz <= 20000000)
1620                 config = GEM_BF(CLK, GEM_CLK_DIV8);
1621         else if (pclk_hz <= 40000000)
1622                 config = GEM_BF(CLK, GEM_CLK_DIV16);
1623         else if (pclk_hz <= 80000000)
1624                 config = GEM_BF(CLK, GEM_CLK_DIV32);
1625         else if (pclk_hz <= 120000000)
1626                 config = GEM_BF(CLK, GEM_CLK_DIV48);
1627         else if (pclk_hz <= 160000000)
1628                 config = GEM_BF(CLK, GEM_CLK_DIV64);
1629         else
1630                 config = GEM_BF(CLK, GEM_CLK_DIV96);
1631 
1632         return config;
1633 }
1634 
1635 static u32 macb_mdc_clk_div(struct macb *bp)
1636 {
1637         u32 config;
1638         unsigned long pclk_hz;
1639 
1640         if (macb_is_gem(bp))
1641                 return gem_mdc_clk_div(bp);
1642 
1643         pclk_hz = clk_get_rate(bp->pclk);
1644         if (pclk_hz <= 20000000)
1645                 config = MACB_BF(CLK, MACB_CLK_DIV8);
1646         else if (pclk_hz <= 40000000)
1647                 config = MACB_BF(CLK, MACB_CLK_DIV16);
1648         else if (pclk_hz <= 80000000)
1649                 config = MACB_BF(CLK, MACB_CLK_DIV32);
1650         else
1651                 config = MACB_BF(CLK, MACB_CLK_DIV64);
1652 
1653         return config;
1654 }
1655 
1656 /*
1657  * Get the DMA bus width field of the network configuration register that we
1658  * should program.  We find the width from decoding the design configuration
1659  * register to find the maximum supported data bus width.
1660  */
1661 static u32 macb_dbw(struct macb *bp)
1662 {
1663         if (!macb_is_gem(bp))
1664                 return 0;
1665 
1666         switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
1667         case 4:
1668                 return GEM_BF(DBW, GEM_DBW128);
1669         case 2:
1670                 return GEM_BF(DBW, GEM_DBW64);
1671         case 1:
1672         default:
1673                 return GEM_BF(DBW, GEM_DBW32);
1674         }
1675 }
1676 
1677 /*
1678  * Configure the receive DMA engine
1679  * - use the correct receive buffer size
1680  * - set best burst length for DMA operations
1681  *   (if not supported by FIFO, it will fallback to default)
1682  * - set both rx/tx packet buffers to full memory size
1683  * These are configurable parameters for GEM.
1684  */
1685 static void macb_configure_dma(struct macb *bp)
1686 {
1687         u32 dmacfg;
1688 
1689         if (macb_is_gem(bp)) {
1690                 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
1691                 dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
1692                 if (bp->dma_burst_length)
1693                         dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
1694                 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
1695                 dmacfg &= ~GEM_BIT(ENDIA_PKT);
1696 
1697                 if (bp->native_io)
1698                         dmacfg &= ~GEM_BIT(ENDIA_DESC);
1699                 else
1700                         dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
1701 
1702                 if (bp->dev->features & NETIF_F_HW_CSUM)
1703                         dmacfg |= GEM_BIT(TXCOEN);
1704                 else
1705                         dmacfg &= ~GEM_BIT(TXCOEN);
1706                 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
1707                            dmacfg);
1708                 gem_writel(bp, DMACFG, dmacfg);
1709         }
1710 }
1711 
1712 static void macb_init_hw(struct macb *bp)
1713 {
1714         struct macb_queue *queue;
1715         unsigned int q;
1716 
1717         u32 config;
1718 
1719         macb_reset_hw(bp);
1720         macb_set_hwaddr(bp);
1721 
1722         config = macb_mdc_clk_div(bp);
1723         if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
1724                 config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
1725         config |= MACB_BF(RBOF, NET_IP_ALIGN);  /* Make eth data aligned */
1726         config |= MACB_BIT(PAE);                /* PAuse Enable */
1727         config |= MACB_BIT(DRFCS);              /* Discard Rx FCS */
1728         if (bp->caps & MACB_CAPS_JUMBO)
1729                 config |= MACB_BIT(JFRAME);     /* Enable jumbo frames */
1730         else
1731                 config |= MACB_BIT(BIG);        /* Receive oversized frames */
1732         if (bp->dev->flags & IFF_PROMISC)
1733                 config |= MACB_BIT(CAF);        /* Copy All Frames */
1734         else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
1735                 config |= GEM_BIT(RXCOEN);
1736         if (!(bp->dev->flags & IFF_BROADCAST))
1737                 config |= MACB_BIT(NBC);        /* No BroadCast */
1738         config |= macb_dbw(bp);
1739         macb_writel(bp, NCFGR, config);
1740         if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
1741                 gem_writel(bp, JML, bp->jumbo_max_len);
1742         bp->speed = SPEED_10;
1743         bp->duplex = DUPLEX_HALF;
1744         bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
1745         if (bp->caps & MACB_CAPS_JUMBO)
1746                 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
1747 
1748         macb_configure_dma(bp);
1749 
1750         /* Initialize TX and RX buffers */
1751         macb_writel(bp, RBQP, bp->rx_ring_dma);
1752         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1753                 queue_writel(queue, TBQP, queue->tx_ring_dma);
1754 
1755                 /* Enable interrupts */
1756                 queue_writel(queue, IER,
1757                              MACB_RX_INT_FLAGS |
1758                              MACB_TX_INT_FLAGS |
1759                              MACB_BIT(HRESP));
1760         }
1761 
1762         /* Enable TX and RX */
1763         macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
1764 }
1765 
1766 /*
1767  * The hash address register is 64 bits long and takes up two
1768  * locations in the memory map.  The least significant bits are stored
1769  * in EMAC_HSL and the most significant bits in EMAC_HSH.
1770  *
1771  * The unicast hash enable and the multicast hash enable bits in the
1772  * network configuration register enable the reception of hash matched
1773  * frames. The destination address is reduced to a 6 bit index into
1774  * the 64 bit hash register using the following hash function.  The
1775  * hash function is an exclusive or of every sixth bit of the
1776  * destination address.
1777  *
1778  * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
1779  * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
1780  * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
1781  * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
1782  * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
1783  * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
1784  *
1785  * da[0] represents the least significant bit of the first byte
1786  * received, that is, the multicast/unicast indicator, and da[47]
1787  * represents the most significant bit of the last byte received.  If
1788  * the hash index, hi[n], points to a bit that is set in the hash
1789  * register then the frame will be matched according to whether the
1790  * frame is multicast or unicast.  A multicast match will be signalled
1791  * if the multicast hash enable bit is set, da[0] is 1 and the hash
1792  * index points to a bit set in the hash register.  A unicast match
1793  * will be signalled if the unicast hash enable bit is set, da[0] is 0
1794  * and the hash index points to a bit set in the hash register.  To
1795  * receive all multicast frames, the hash register should be set with
1796  * all ones and the multicast hash enable bit should be set in the
1797  * network configuration register.
1798  */
1799 
1800 static inline int hash_bit_value(int bitnr, __u8 *addr)
1801 {
1802         if (addr[bitnr / 8] & (1 << (bitnr % 8)))
1803                 return 1;
1804         return 0;
1805 }
1806 
1807 /*
1808  * Return the hash index value for the specified address.
1809  */
1810 static int hash_get_index(__u8 *addr)
1811 {
1812         int i, j, bitval;
1813         int hash_index = 0;
1814 
1815         for (j = 0; j < 6; j++) {
1816                 for (i = 0, bitval = 0; i < 8; i++)
1817                         bitval ^= hash_bit_value(i * 6 + j, addr);
1818 
1819                 hash_index |= (bitval << j);
1820         }
1821 
1822         return hash_index;
1823 }
1824 
1825 /*
1826  * Add multicast addresses to the internal multicast-hash table.
1827  */
1828 static void macb_sethashtable(struct net_device *dev)
1829 {
1830         struct netdev_hw_addr *ha;
1831         unsigned long mc_filter[2];
1832         unsigned int bitnr;
1833         struct macb *bp = netdev_priv(dev);
1834 
1835         mc_filter[0] = mc_filter[1] = 0;
1836 
1837         netdev_for_each_mc_addr(ha, dev) {
1838                 bitnr = hash_get_index(ha->addr);
1839                 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
1840         }
1841 
1842         macb_or_gem_writel(bp, HRB, mc_filter[0]);
1843         macb_or_gem_writel(bp, HRT, mc_filter[1]);
1844 }
1845 
1846 /*
1847  * Enable/Disable promiscuous and multicast modes.
1848  */
1849 static void macb_set_rx_mode(struct net_device *dev)
1850 {
1851         unsigned long cfg;
1852         struct macb *bp = netdev_priv(dev);
1853 
1854         cfg = macb_readl(bp, NCFGR);
1855 
1856         if (dev->flags & IFF_PROMISC) {
1857                 /* Enable promiscuous mode */
1858                 cfg |= MACB_BIT(CAF);
1859 
1860                 /* Disable RX checksum offload */
1861                 if (macb_is_gem(bp))
1862                         cfg &= ~GEM_BIT(RXCOEN);
1863         } else {
1864                 /* Disable promiscuous mode */
1865                 cfg &= ~MACB_BIT(CAF);
1866 
1867                 /* Enable RX checksum offload only if requested */
1868                 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
1869                         cfg |= GEM_BIT(RXCOEN);
1870         }
1871 
1872         if (dev->flags & IFF_ALLMULTI) {
1873                 /* Enable all multicast mode */
1874                 macb_or_gem_writel(bp, HRB, -1);
1875                 macb_or_gem_writel(bp, HRT, -1);
1876                 cfg |= MACB_BIT(NCFGR_MTI);
1877         } else if (!netdev_mc_empty(dev)) {
1878                 /* Enable specific multicasts */
1879                 macb_sethashtable(dev);
1880                 cfg |= MACB_BIT(NCFGR_MTI);
1881         } else if (dev->flags & (~IFF_ALLMULTI)) {
1882                 /* Disable all multicast mode */
1883                 macb_or_gem_writel(bp, HRB, 0);
1884                 macb_or_gem_writel(bp, HRT, 0);
1885                 cfg &= ~MACB_BIT(NCFGR_MTI);
1886         }
1887 
1888         macb_writel(bp, NCFGR, cfg);
1889 }
1890 
1891 static int macb_open(struct net_device *dev)
1892 {
1893         struct macb *bp = netdev_priv(dev);
1894         size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
1895         int err;
1896 
1897         netdev_dbg(bp->dev, "open\n");
1898 
1899         /* carrier starts down */
1900         netif_carrier_off(dev);
1901 
1902         /* if the phy is not yet register, retry later*/
1903         if (!bp->phy_dev)
1904                 return -EAGAIN;
1905 
1906         /* RX buffers initialization */
1907         macb_init_rx_buffer_size(bp, bufsz);
1908 
1909         err = macb_alloc_consistent(bp);
1910         if (err) {
1911                 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
1912                            err);
1913                 return err;
1914         }
1915 
1916         napi_enable(&bp->napi);
1917 
1918         bp->macbgem_ops.mog_init_rings(bp);
1919         macb_init_hw(bp);
1920 
1921         /* schedule a link state check */
1922         phy_start(bp->phy_dev);
1923 
1924         netif_tx_start_all_queues(dev);
1925 
1926         return 0;
1927 }
1928 
1929 static int macb_close(struct net_device *dev)
1930 {
1931         struct macb *bp = netdev_priv(dev);
1932         unsigned long flags;
1933 
1934         netif_tx_stop_all_queues(dev);
1935         napi_disable(&bp->napi);
1936 
1937         if (bp->phy_dev)
1938                 phy_stop(bp->phy_dev);
1939 
1940         spin_lock_irqsave(&bp->lock, flags);
1941         macb_reset_hw(bp);
1942         netif_carrier_off(dev);
1943         spin_unlock_irqrestore(&bp->lock, flags);
1944 
1945         macb_free_consistent(bp);
1946 
1947         return 0;
1948 }
1949 
1950 static int macb_change_mtu(struct net_device *dev, int new_mtu)
1951 {
1952         struct macb *bp = netdev_priv(dev);
1953         u32 max_mtu;
1954 
1955         if (netif_running(dev))
1956                 return -EBUSY;
1957 
1958         max_mtu = ETH_DATA_LEN;
1959         if (bp->caps & MACB_CAPS_JUMBO)
1960                 max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
1961 
1962         if ((new_mtu > max_mtu) || (new_mtu < GEM_MTU_MIN_SIZE))
1963                 return -EINVAL;
1964 
1965         dev->mtu = new_mtu;
1966 
1967         return 0;
1968 }
1969 
1970 static void gem_update_stats(struct macb *bp)
1971 {
1972         unsigned int i;
1973         u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
1974 
1975         for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
1976                 u32 offset = gem_statistics[i].offset;
1977                 u64 val = bp->macb_reg_readl(bp, offset);
1978 
1979                 bp->ethtool_stats[i] += val;
1980                 *p += val;
1981 
1982                 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
1983                         /* Add GEM_OCTTXH, GEM_OCTRXH */
1984                         val = bp->macb_reg_readl(bp, offset + 4);
1985                         bp->ethtool_stats[i] += ((u64)val) << 32;
1986                         *(++p) += val;
1987                 }
1988         }
1989 }
1990 
1991 static struct net_device_stats *gem_get_stats(struct macb *bp)
1992 {
1993         struct gem_stats *hwstat = &bp->hw_stats.gem;
1994         struct net_device_stats *nstat = &bp->stats;
1995 
1996         gem_update_stats(bp);
1997 
1998         nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
1999                             hwstat->rx_alignment_errors +
2000                             hwstat->rx_resource_errors +
2001                             hwstat->rx_overruns +
2002                             hwstat->rx_oversize_frames +
2003                             hwstat->rx_jabbers +
2004                             hwstat->rx_undersized_frames +
2005                             hwstat->rx_length_field_frame_errors);
2006         nstat->tx_errors = (hwstat->tx_late_collisions +
2007                             hwstat->tx_excessive_collisions +
2008                             hwstat->tx_underrun +
2009                             hwstat->tx_carrier_sense_errors);
2010         nstat->multicast = hwstat->rx_multicast_frames;
2011         nstat->collisions = (hwstat->tx_single_collision_frames +
2012                              hwstat->tx_multiple_collision_frames +
2013                              hwstat->tx_excessive_collisions);
2014         nstat->rx_length_errors = (hwstat->rx_oversize_frames +
2015                                    hwstat->rx_jabbers +
2016                                    hwstat->rx_undersized_frames +
2017                                    hwstat->rx_length_field_frame_errors);
2018         nstat->rx_over_errors = hwstat->rx_resource_errors;
2019         nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
2020         nstat->rx_frame_errors = hwstat->rx_alignment_errors;
2021         nstat->rx_fifo_errors = hwstat->rx_overruns;
2022         nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
2023         nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
2024         nstat->tx_fifo_errors = hwstat->tx_underrun;
2025 
2026         return nstat;
2027 }
2028 
2029 static void gem_get_ethtool_stats(struct net_device *dev,
2030                                   struct ethtool_stats *stats, u64 *data)
2031 {
2032         struct macb *bp;
2033 
2034         bp = netdev_priv(dev);
2035         gem_update_stats(bp);
2036         memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
2037 }
2038 
2039 static int gem_get_sset_count(struct net_device *dev, int sset)
2040 {
2041         switch (sset) {
2042         case ETH_SS_STATS:
2043                 return GEM_STATS_LEN;
2044         default:
2045                 return -EOPNOTSUPP;
2046         }
2047 }
2048 
2049 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
2050 {
2051         unsigned int i;
2052 
2053         switch (sset) {
2054         case ETH_SS_STATS:
2055                 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
2056                         memcpy(p, gem_statistics[i].stat_string,
2057                                ETH_GSTRING_LEN);
2058                 break;
2059         }
2060 }
2061 
2062 static struct net_device_stats *macb_get_stats(struct net_device *dev)
2063 {
2064         struct macb *bp = netdev_priv(dev);
2065         struct net_device_stats *nstat = &bp->stats;
2066         struct macb_stats *hwstat = &bp->hw_stats.macb;
2067 
2068         if (macb_is_gem(bp))
2069                 return gem_get_stats(bp);
2070 
2071         /* read stats from hardware */
2072         macb_update_stats(bp);
2073 
2074         /* Convert HW stats into netdevice stats */
2075         nstat->rx_errors = (hwstat->rx_fcs_errors +
2076                             hwstat->rx_align_errors +
2077                             hwstat->rx_resource_errors +
2078                             hwstat->rx_overruns +
2079                             hwstat->rx_oversize_pkts +
2080                             hwstat->rx_jabbers +
2081                             hwstat->rx_undersize_pkts +
2082                             hwstat->rx_length_mismatch);
2083         nstat->tx_errors = (hwstat->tx_late_cols +
2084                             hwstat->tx_excessive_cols +
2085                             hwstat->tx_underruns +
2086                             hwstat->tx_carrier_errors +
2087                             hwstat->sqe_test_errors);
2088         nstat->collisions = (hwstat->tx_single_cols +
2089                              hwstat->tx_multiple_cols +
2090                              hwstat->tx_excessive_cols);
2091         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
2092                                    hwstat->rx_jabbers +
2093                                    hwstat->rx_undersize_pkts +
2094                                    hwstat->rx_length_mismatch);
2095         nstat->rx_over_errors = hwstat->rx_resource_errors +
2096                                    hwstat->rx_overruns;
2097         nstat->rx_crc_errors = hwstat->rx_fcs_errors;
2098         nstat->rx_frame_errors = hwstat->rx_align_errors;
2099         nstat->rx_fifo_errors = hwstat->rx_overruns;
2100         /* XXX: What does "missed" mean? */
2101         nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
2102         nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
2103         nstat->tx_fifo_errors = hwstat->tx_underruns;
2104         /* Don't know about heartbeat or window errors... */
2105 
2106         return nstat;
2107 }
2108 
2109 static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2110 {
2111         struct macb *bp = netdev_priv(dev);
2112         struct phy_device *phydev = bp->phy_dev;
2113 
2114         if (!phydev)
2115                 return -ENODEV;
2116 
2117         return phy_ethtool_gset(phydev, cmd);
2118 }
2119 
2120 static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2121 {
2122         struct macb *bp = netdev_priv(dev);
2123         struct phy_device *phydev = bp->phy_dev;
2124 
2125         if (!phydev)
2126                 return -ENODEV;
2127 
2128         return phy_ethtool_sset(phydev, cmd);
2129 }
2130 
2131 static int macb_get_regs_len(struct net_device *netdev)
2132 {
2133         return MACB_GREGS_NBR * sizeof(u32);
2134 }
2135 
2136 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2137                           void *p)
2138 {
2139         struct macb *bp = netdev_priv(dev);
2140         unsigned int tail, head;
2141         u32 *regs_buff = p;
2142 
2143         regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
2144                         | MACB_GREGS_VERSION;
2145 
2146         tail = macb_tx_ring_wrap(bp->queues[0].tx_tail);
2147         head = macb_tx_ring_wrap(bp->queues[0].tx_head);
2148 
2149         regs_buff[0]  = macb_readl(bp, NCR);
2150         regs_buff[1]  = macb_or_gem_readl(bp, NCFGR);
2151         regs_buff[2]  = macb_readl(bp, NSR);
2152         regs_buff[3]  = macb_readl(bp, TSR);
2153         regs_buff[4]  = macb_readl(bp, RBQP);
2154         regs_buff[5]  = macb_readl(bp, TBQP);
2155         regs_buff[6]  = macb_readl(bp, RSR);
2156         regs_buff[7]  = macb_readl(bp, IMR);
2157 
2158         regs_buff[8]  = tail;
2159         regs_buff[9]  = head;
2160         regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
2161         regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
2162 
2163         if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
2164                 regs_buff[12] = macb_or_gem_readl(bp, USRIO);
2165         if (macb_is_gem(bp)) {
2166                 regs_buff[13] = gem_readl(bp, DMACFG);
2167         }
2168 }
2169 
2170 static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2171 {
2172         struct macb *bp = netdev_priv(netdev);
2173 
2174         wol->supported = 0;
2175         wol->wolopts = 0;
2176 
2177         if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
2178                 wol->supported = WAKE_MAGIC;
2179 
2180                 if (bp->wol & MACB_WOL_ENABLED)
2181                         wol->wolopts |= WAKE_MAGIC;
2182         }
2183 }
2184 
2185 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2186 {
2187         struct macb *bp = netdev_priv(netdev);
2188 
2189         if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
2190             (wol->wolopts & ~WAKE_MAGIC))
2191                 return -EOPNOTSUPP;
2192 
2193         if (wol->wolopts & WAKE_MAGIC)
2194                 bp->wol |= MACB_WOL_ENABLED;
2195         else
2196                 bp->wol &= ~MACB_WOL_ENABLED;
2197 
2198         device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
2199 
2200         return 0;
2201 }
2202 
2203 static const struct ethtool_ops macb_ethtool_ops = {
2204         .get_settings           = macb_get_settings,
2205         .set_settings           = macb_set_settings,
2206         .get_regs_len           = macb_get_regs_len,
2207         .get_regs               = macb_get_regs,
2208         .get_link               = ethtool_op_get_link,
2209         .get_ts_info            = ethtool_op_get_ts_info,
2210         .get_wol                = macb_get_wol,
2211         .set_wol                = macb_set_wol,
2212 };
2213 
2214 static const struct ethtool_ops gem_ethtool_ops = {
2215         .get_settings           = macb_get_settings,
2216         .set_settings           = macb_set_settings,
2217         .get_regs_len           = macb_get_regs_len,
2218         .get_regs               = macb_get_regs,
2219         .get_link               = ethtool_op_get_link,
2220         .get_ts_info            = ethtool_op_get_ts_info,
2221         .get_ethtool_stats      = gem_get_ethtool_stats,
2222         .get_strings            = gem_get_ethtool_strings,
2223         .get_sset_count         = gem_get_sset_count,
2224 };
2225 
2226 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2227 {
2228         struct macb *bp = netdev_priv(dev);
2229         struct phy_device *phydev = bp->phy_dev;
2230 
2231         if (!netif_running(dev))
2232                 return -EINVAL;
2233 
2234         if (!phydev)
2235                 return -ENODEV;
2236 
2237         return phy_mii_ioctl(phydev, rq, cmd);
2238 }
2239 
2240 static int macb_set_features(struct net_device *netdev,
2241                              netdev_features_t features)
2242 {
2243         struct macb *bp = netdev_priv(netdev);
2244         netdev_features_t changed = features ^ netdev->features;
2245 
2246         /* TX checksum offload */
2247         if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
2248                 u32 dmacfg;
2249 
2250                 dmacfg = gem_readl(bp, DMACFG);
2251                 if (features & NETIF_F_HW_CSUM)
2252                         dmacfg |= GEM_BIT(TXCOEN);
2253                 else
2254                         dmacfg &= ~GEM_BIT(TXCOEN);
2255                 gem_writel(bp, DMACFG, dmacfg);
2256         }
2257 
2258         /* RX checksum offload */
2259         if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
2260                 u32 netcfg;
2261 
2262                 netcfg = gem_readl(bp, NCFGR);
2263                 if (features & NETIF_F_RXCSUM &&
2264                     !(netdev->flags & IFF_PROMISC))
2265                         netcfg |= GEM_BIT(RXCOEN);
2266                 else
2267                         netcfg &= ~GEM_BIT(RXCOEN);
2268                 gem_writel(bp, NCFGR, netcfg);
2269         }
2270 
2271         return 0;
2272 }
2273 
2274 static const struct net_device_ops macb_netdev_ops = {
2275         .ndo_open               = macb_open,
2276         .ndo_stop               = macb_close,
2277         .ndo_start_xmit         = macb_start_xmit,
2278         .ndo_set_rx_mode        = macb_set_rx_mode,
2279         .ndo_get_stats          = macb_get_stats,
2280         .ndo_do_ioctl           = macb_ioctl,
2281         .ndo_validate_addr      = eth_validate_addr,
2282         .ndo_change_mtu         = macb_change_mtu,
2283         .ndo_set_mac_address    = eth_mac_addr,
2284 #ifdef CONFIG_NET_POLL_CONTROLLER
2285         .ndo_poll_controller    = macb_poll_controller,
2286 #endif
2287         .ndo_set_features       = macb_set_features,
2288 };
2289 
2290 /*
2291  * Configure peripheral capabilities according to device tree
2292  * and integration options used
2293  */
2294 static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf)
2295 {
2296         u32 dcfg;
2297 
2298         if (dt_conf)
2299                 bp->caps = dt_conf->caps;
2300 
2301         if (hw_is_gem(bp->regs, bp->native_io)) {
2302                 bp->caps |= MACB_CAPS_MACB_IS_GEM;
2303 
2304                 dcfg = gem_readl(bp, DCFG1);
2305                 if (GEM_BFEXT(IRQCOR, dcfg) == 0)
2306                         bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
2307                 dcfg = gem_readl(bp, DCFG2);
2308                 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
2309                         bp->caps |= MACB_CAPS_FIFO_MODE;
2310         }
2311 
2312         dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
2313 }
2314 
2315 static void macb_probe_queues(void __iomem *mem,
2316                               bool native_io,
2317                               unsigned int *queue_mask,
2318                               unsigned int *num_queues)
2319 {
2320         unsigned int hw_q;
2321 
2322         *queue_mask = 0x1;
2323         *num_queues = 1;
2324 
2325         /* is it macb or gem ?
2326          *
2327          * We need to read directly from the hardware here because
2328          * we are early in the probe process and don't have the
2329          * MACB_CAPS_MACB_IS_GEM flag positioned
2330          */
2331         if (!hw_is_gem(mem, native_io))
2332                 return;
2333 
2334         /* bit 0 is never set but queue 0 always exists */
2335         *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
2336 
2337         *queue_mask |= 0x1;
2338 
2339         for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
2340                 if (*queue_mask & (1 << hw_q))
2341                         (*num_queues)++;
2342 }
2343 
2344 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
2345                          struct clk **hclk, struct clk **tx_clk)
2346 {
2347         int err;
2348 
2349         *pclk = devm_clk_get(&pdev->dev, "pclk");
2350         if (IS_ERR(*pclk)) {
2351                 err = PTR_ERR(*pclk);
2352                 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
2353                 return err;
2354         }
2355 
2356         *hclk = devm_clk_get(&pdev->dev, "hclk");
2357         if (IS_ERR(*hclk)) {
2358                 err = PTR_ERR(*hclk);
2359                 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
2360                 return err;
2361         }
2362 
2363         *tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
2364         if (IS_ERR(*tx_clk))
2365                 *tx_clk = NULL;
2366 
2367         err = clk_prepare_enable(*pclk);
2368         if (err) {
2369                 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
2370                 return err;
2371         }
2372 
2373         err = clk_prepare_enable(*hclk);
2374         if (err) {
2375                 dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
2376                 goto err_disable_pclk;
2377         }
2378 
2379         err = clk_prepare_enable(*tx_clk);
2380         if (err) {
2381                 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
2382                 goto err_disable_hclk;
2383         }
2384 
2385         return 0;
2386 
2387 err_disable_hclk:
2388         clk_disable_unprepare(*hclk);
2389 
2390 err_disable_pclk:
2391         clk_disable_unprepare(*pclk);
2392 
2393         return err;
2394 }
2395 
2396 static int macb_init(struct platform_device *pdev)
2397 {
2398         struct net_device *dev = platform_get_drvdata(pdev);
2399         unsigned int hw_q, q;
2400         struct macb *bp = netdev_priv(dev);
2401         struct macb_queue *queue;
2402         int err;
2403         u32 val;
2404 
2405         /* set the queue register mapping once for all: queue0 has a special
2406          * register mapping but we don't want to test the queue index then
2407          * compute the corresponding register offset at run time.
2408          */
2409         for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
2410                 if (!(bp->queue_mask & (1 << hw_q)))
2411                         continue;
2412 
2413                 queue = &bp->queues[q];
2414                 queue->bp = bp;
2415                 if (hw_q) {
2416                         queue->ISR  = GEM_ISR(hw_q - 1);
2417                         queue->IER  = GEM_IER(hw_q - 1);
2418                         queue->IDR  = GEM_IDR(hw_q - 1);
2419                         queue->IMR  = GEM_IMR(hw_q - 1);
2420                         queue->TBQP = GEM_TBQP(hw_q - 1);
2421                 } else {
2422                         /* queue0 uses legacy registers */
2423                         queue->ISR  = MACB_ISR;
2424                         queue->IER  = MACB_IER;
2425                         queue->IDR  = MACB_IDR;
2426                         queue->IMR  = MACB_IMR;
2427                         queue->TBQP = MACB_TBQP;
2428                 }
2429 
2430                 /* get irq: here we use the linux queue index, not the hardware
2431                  * queue index. the queue irq definitions in the device tree
2432                  * must remove the optional gaps that could exist in the
2433                  * hardware queue mask.
2434                  */
2435                 queue->irq = platform_get_irq(pdev, q);
2436                 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
2437                                        IRQF_SHARED, dev->name, queue);
2438                 if (err) {
2439                         dev_err(&pdev->dev,
2440                                 "Unable to request IRQ %d (error %d)\n",
2441                                 queue->irq, err);
2442                         return err;
2443                 }
2444 
2445                 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
2446                 q++;
2447         }
2448 
2449         dev->netdev_ops = &macb_netdev_ops;
2450         netif_napi_add(dev, &bp->napi, macb_poll, 64);
2451 
2452         /* setup appropriated routines according to adapter type */
2453         if (macb_is_gem(bp)) {
2454                 bp->max_tx_length = GEM_MAX_TX_LEN;
2455                 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
2456                 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
2457                 bp->macbgem_ops.mog_init_rings = gem_init_rings;
2458                 bp->macbgem_ops.mog_rx = gem_rx;
2459                 dev->ethtool_ops = &gem_ethtool_ops;
2460         } else {
2461                 bp->max_tx_length = MACB_MAX_TX_LEN;
2462                 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
2463                 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
2464                 bp->macbgem_ops.mog_init_rings = macb_init_rings;
2465                 bp->macbgem_ops.mog_rx = macb_rx;
2466                 dev->ethtool_ops = &macb_ethtool_ops;
2467         }
2468 
2469         /* Set features */
2470         dev->hw_features = NETIF_F_SG;
2471         /* Checksum offload is only available on gem with packet buffer */
2472         if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
2473                 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2474         if (bp->caps & MACB_CAPS_SG_DISABLED)
2475                 dev->hw_features &= ~NETIF_F_SG;
2476         dev->features = dev->hw_features;
2477 
2478         if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
2479                 val = 0;
2480                 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
2481                         val = GEM_BIT(RGMII);
2482                 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
2483                          (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
2484                         val = MACB_BIT(RMII);
2485                 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
2486                         val = MACB_BIT(MII);
2487 
2488                 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
2489                         val |= MACB_BIT(CLKEN);
2490 
2491                 macb_or_gem_writel(bp, USRIO, val);
2492         }
2493 
2494         /* Set MII management clock divider */
2495         val = macb_mdc_clk_div(bp);
2496         val |= macb_dbw(bp);
2497         if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
2498                 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
2499         macb_writel(bp, NCFGR, val);
2500 
2501         return 0;
2502 }
2503 
2504 #if defined(CONFIG_OF)
2505 /* 1518 rounded up */
2506 #define AT91ETHER_MAX_RBUFF_SZ  0x600
2507 /* max number of receive buffers */
2508 #define AT91ETHER_MAX_RX_DESCR  9
2509 
2510 /* Initialize and start the Receiver and Transmit subsystems */
2511 static int at91ether_start(struct net_device *dev)
2512 {
2513         struct macb *lp = netdev_priv(dev);
2514         dma_addr_t addr;
2515         u32 ctl;
2516         int i;
2517 
2518         lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
2519                                          (AT91ETHER_MAX_RX_DESCR *
2520                                           sizeof(struct macb_dma_desc)),
2521                                          &lp->rx_ring_dma, GFP_KERNEL);
2522         if (!lp->rx_ring)
2523                 return -ENOMEM;
2524 
2525         lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
2526                                             AT91ETHER_MAX_RX_DESCR *
2527                                             AT91ETHER_MAX_RBUFF_SZ,
2528                                             &lp->rx_buffers_dma, GFP_KERNEL);
2529         if (!lp->rx_buffers) {
2530                 dma_free_coherent(&lp->pdev->dev,
2531                                   AT91ETHER_MAX_RX_DESCR *
2532                                   sizeof(struct macb_dma_desc),
2533                                   lp->rx_ring, lp->rx_ring_dma);
2534                 lp->rx_ring = NULL;
2535                 return -ENOMEM;
2536         }
2537 
2538         addr = lp->rx_buffers_dma;
2539         for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
2540                 lp->rx_ring[i].addr = addr;
2541                 lp->rx_ring[i].ctrl = 0;
2542                 addr += AT91ETHER_MAX_RBUFF_SZ;
2543         }
2544 
2545         /* Set the Wrap bit on the last descriptor */
2546         lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
2547 
2548         /* Reset buffer index */
2549         lp->rx_tail = 0;
2550 
2551         /* Program address of descriptor list in Rx Buffer Queue register */
2552         macb_writel(lp, RBQP, lp->rx_ring_dma);
2553 
2554         /* Enable Receive and Transmit */
2555         ctl = macb_readl(lp, NCR);
2556         macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
2557 
2558         return 0;
2559 }
2560 
2561 /* Open the ethernet interface */
2562 static int at91ether_open(struct net_device *dev)
2563 {
2564         struct macb *lp = netdev_priv(dev);
2565         u32 ctl;
2566         int ret;
2567 
2568         /* Clear internal statistics */
2569         ctl = macb_readl(lp, NCR);
2570         macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
2571 
2572         macb_set_hwaddr(lp);
2573 
2574         ret = at91ether_start(dev);
2575         if (ret)
2576                 return ret;
2577 
2578         /* Enable MAC interrupts */
2579         macb_writel(lp, IER, MACB_BIT(RCOMP)    |
2580                              MACB_BIT(RXUBR)    |
2581                              MACB_BIT(ISR_TUND) |
2582                              MACB_BIT(ISR_RLE)  |
2583                              MACB_BIT(TCOMP)    |
2584                              MACB_BIT(ISR_ROVR) |
2585                              MACB_BIT(HRESP));
2586 
2587         /* schedule a link state check */
2588         phy_start(lp->phy_dev);
2589 
2590         netif_start_queue(dev);
2591 
2592         return 0;
2593 }
2594 
2595 /* Close the interface */
2596 static int at91ether_close(struct net_device *dev)
2597 {
2598         struct macb *lp = netdev_priv(dev);
2599         u32 ctl;
2600 
2601         /* Disable Receiver and Transmitter */
2602         ctl = macb_readl(lp, NCR);
2603         macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
2604 
2605         /* Disable MAC interrupts */
2606         macb_writel(lp, IDR, MACB_BIT(RCOMP)    |
2607                              MACB_BIT(RXUBR)    |
2608                              MACB_BIT(ISR_TUND) |
2609                              MACB_BIT(ISR_RLE)  |
2610                              MACB_BIT(TCOMP)    |
2611                              MACB_BIT(ISR_ROVR) |
2612                              MACB_BIT(HRESP));
2613 
2614         netif_stop_queue(dev);
2615 
2616         dma_free_coherent(&lp->pdev->dev,
2617                           AT91ETHER_MAX_RX_DESCR *
2618                           sizeof(struct macb_dma_desc),
2619                           lp->rx_ring, lp->rx_ring_dma);
2620         lp->rx_ring = NULL;
2621 
2622         dma_free_coherent(&lp->pdev->dev,
2623                           AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
2624                           lp->rx_buffers, lp->rx_buffers_dma);
2625         lp->rx_buffers = NULL;
2626 
2627         return 0;
2628 }
2629 
2630 /* Transmit packet */
2631 static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
2632 {
2633         struct macb *lp = netdev_priv(dev);
2634 
2635         if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
2636                 netif_stop_queue(dev);
2637 
2638                 /* Store packet information (to free when Tx completed) */
2639                 lp->skb = skb;
2640                 lp->skb_length = skb->len;
2641                 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
2642                                                         DMA_TO_DEVICE);
2643 
2644                 /* Set address of the data in the Transmit Address register */
2645                 macb_writel(lp, TAR, lp->skb_physaddr);
2646                 /* Set length of the packet in the Transmit Control register */
2647                 macb_writel(lp, TCR, skb->len);
2648 
2649         } else {
2650                 netdev_err(dev, "%s called, but device is busy!\n", __func__);
2651                 return NETDEV_TX_BUSY;
2652         }
2653 
2654         return NETDEV_TX_OK;
2655 }
2656 
2657 /* Extract received frame from buffer descriptors and sent to upper layers.
2658  * (Called from interrupt context)
2659  */
2660 static void at91ether_rx(struct net_device *dev)
2661 {
2662         struct macb *lp = netdev_priv(dev);
2663         unsigned char *p_recv;
2664         struct sk_buff *skb;
2665         unsigned int pktlen;
2666 
2667         while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
2668                 p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
2669                 pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
2670                 skb = netdev_alloc_skb(dev, pktlen + 2);
2671                 if (skb) {
2672                         skb_reserve(skb, 2);
2673                         memcpy(skb_put(skb, pktlen), p_recv, pktlen);
2674 
2675                         skb->protocol = eth_type_trans(skb, dev);
2676                         lp->stats.rx_packets++;
2677                         lp->stats.rx_bytes += pktlen;
2678                         netif_rx(skb);
2679                 } else {
2680                         lp->stats.rx_dropped++;
2681                 }
2682 
2683                 if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
2684                         lp->stats.multicast++;
2685 
2686                 /* reset ownership bit */
2687                 lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
2688 
2689                 /* wrap after last buffer */
2690                 if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
2691                         lp->rx_tail = 0;
2692                 else
2693                         lp->rx_tail++;
2694         }
2695 }
2696 
2697 /* MAC interrupt handler */
2698 static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
2699 {
2700         struct net_device *dev = dev_id;
2701         struct macb *lp = netdev_priv(dev);
2702         u32 intstatus, ctl;
2703 
2704         /* MAC Interrupt Status register indicates what interrupts are pending.
2705          * It is automatically cleared once read.
2706          */
2707         intstatus = macb_readl(lp, ISR);
2708 
2709         /* Receive complete */
2710         if (intstatus & MACB_BIT(RCOMP))
2711                 at91ether_rx(dev);
2712 
2713         /* Transmit complete */
2714         if (intstatus & MACB_BIT(TCOMP)) {
2715                 /* The TCOM bit is set even if the transmission failed */
2716                 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
2717                         lp->stats.tx_errors++;
2718 
2719                 if (lp->skb) {
2720                         dev_kfree_skb_irq(lp->skb);
2721                         lp->skb = NULL;
2722                         dma_unmap_single(NULL, lp->skb_physaddr,
2723                                          lp->skb_length, DMA_TO_DEVICE);
2724                         lp->stats.tx_packets++;
2725                         lp->stats.tx_bytes += lp->skb_length;
2726                 }
2727                 netif_wake_queue(dev);
2728         }
2729 
2730         /* Work-around for EMAC Errata section 41.3.1 */
2731         if (intstatus & MACB_BIT(RXUBR)) {
2732                 ctl = macb_readl(lp, NCR);
2733                 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
2734                 macb_writel(lp, NCR, ctl | MACB_BIT(RE));
2735         }
2736 
2737         if (intstatus & MACB_BIT(ISR_ROVR))
2738                 netdev_err(dev, "ROVR error\n");
2739 
2740         return IRQ_HANDLED;
2741 }
2742 
2743 #ifdef CONFIG_NET_POLL_CONTROLLER
2744 static void at91ether_poll_controller(struct net_device *dev)
2745 {
2746         unsigned long flags;
2747 
2748         local_irq_save(flags);
2749         at91ether_interrupt(dev->irq, dev);
2750         local_irq_restore(flags);
2751 }
2752 #endif
2753 
2754 static const struct net_device_ops at91ether_netdev_ops = {
2755         .ndo_open               = at91ether_open,
2756         .ndo_stop               = at91ether_close,
2757         .ndo_start_xmit         = at91ether_start_xmit,
2758         .ndo_get_stats          = macb_get_stats,
2759         .ndo_set_rx_mode        = macb_set_rx_mode,
2760         .ndo_set_mac_address    = eth_mac_addr,
2761         .ndo_do_ioctl           = macb_ioctl,
2762         .ndo_validate_addr      = eth_validate_addr,
2763         .ndo_change_mtu         = eth_change_mtu,
2764 #ifdef CONFIG_NET_POLL_CONTROLLER
2765         .ndo_poll_controller    = at91ether_poll_controller,
2766 #endif
2767 };
2768 
2769 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
2770                               struct clk **hclk, struct clk **tx_clk)
2771 {
2772         int err;
2773 
2774         *hclk = NULL;
2775         *tx_clk = NULL;
2776 
2777         *pclk = devm_clk_get(&pdev->dev, "ether_clk");
2778         if (IS_ERR(*pclk))
2779                 return PTR_ERR(*pclk);
2780 
2781         err = clk_prepare_enable(*pclk);
2782         if (err) {
2783                 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
2784                 return err;
2785         }
2786 
2787         return 0;
2788 }
2789 
2790 static int at91ether_init(struct platform_device *pdev)
2791 {
2792         struct net_device *dev = platform_get_drvdata(pdev);
2793         struct macb *bp = netdev_priv(dev);
2794         int err;
2795         u32 reg;
2796 
2797         dev->netdev_ops = &at91ether_netdev_ops;
2798         dev->ethtool_ops = &macb_ethtool_ops;
2799 
2800         err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
2801                                0, dev->name, dev);
2802         if (err)
2803                 return err;
2804 
2805         macb_writel(bp, NCR, 0);
2806 
2807         reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
2808         if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
2809                 reg |= MACB_BIT(RM9200_RMII);
2810 
2811         macb_writel(bp, NCFGR, reg);
2812 
2813         return 0;
2814 }
2815 
2816 static const struct macb_config at91sam9260_config = {
2817         .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
2818         .clk_init = macb_clk_init,
2819         .init = macb_init,
2820 };
2821 
2822 static const struct macb_config pc302gem_config = {
2823         .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
2824         .dma_burst_length = 16,
2825         .clk_init = macb_clk_init,
2826         .init = macb_init,
2827 };
2828 
2829 static const struct macb_config sama5d2_config = {
2830         .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
2831         .dma_burst_length = 16,
2832         .clk_init = macb_clk_init,
2833         .init = macb_init,
2834 };
2835 
2836 static const struct macb_config sama5d3_config = {
2837         .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
2838               | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
2839         .dma_burst_length = 16,
2840         .clk_init = macb_clk_init,
2841         .init = macb_init,
2842 };
2843 
2844 static const struct macb_config sama5d4_config = {
2845         .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
2846         .dma_burst_length = 4,
2847         .clk_init = macb_clk_init,
2848         .init = macb_init,
2849 };
2850 
2851 static const struct macb_config emac_config = {
2852         .clk_init = at91ether_clk_init,
2853         .init = at91ether_init,
2854 };
2855 
2856 static const struct macb_config np4_config = {
2857         .caps = MACB_CAPS_USRIO_DISABLED,
2858         .clk_init = macb_clk_init,
2859         .init = macb_init,
2860 };
2861 
2862 static const struct macb_config zynqmp_config = {
2863         .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
2864         .dma_burst_length = 16,
2865         .clk_init = macb_clk_init,
2866         .init = macb_init,
2867         .jumbo_max_len = 10240,
2868 };
2869 
2870 static const struct macb_config zynq_config = {
2871         .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
2872         .dma_burst_length = 16,
2873         .clk_init = macb_clk_init,
2874         .init = macb_init,
2875 };
2876 
2877 static const struct of_device_id macb_dt_ids[] = {
2878         { .compatible = "cdns,at32ap7000-macb" },
2879         { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
2880         { .compatible = "cdns,macb" },
2881         { .compatible = "cdns,np4-macb", .data = &np4_config },
2882         { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
2883         { .compatible = "cdns,gem", .data = &pc302gem_config },
2884         { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
2885         { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
2886         { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
2887         { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
2888         { .compatible = "cdns,emac", .data = &emac_config },
2889         { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
2890         { .compatible = "cdns,zynq-gem", .data = &zynq_config },
2891         { /* sentinel */ }
2892 };
2893 MODULE_DEVICE_TABLE(of, macb_dt_ids);
2894 #endif /* CONFIG_OF */
2895 
2896 static int macb_probe(struct platform_device *pdev)
2897 {
2898         int (*clk_init)(struct platform_device *, struct clk **,
2899                         struct clk **, struct clk **)
2900                                               = macb_clk_init;
2901         int (*init)(struct platform_device *) = macb_init;
2902         struct device_node *np = pdev->dev.of_node;
2903         struct device_node *phy_node;
2904         const struct macb_config *macb_config = NULL;
2905         struct clk *pclk, *hclk = NULL, *tx_clk = NULL;
2906         unsigned int queue_mask, num_queues;
2907         struct macb_platform_data *pdata;
2908         bool native_io;
2909         struct phy_device *phydev;
2910         struct net_device *dev;
2911         struct resource *regs;
2912         void __iomem *mem;
2913         const char *mac;
2914         struct macb *bp;
2915         int err;
2916 
2917         regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2918         mem = devm_ioremap_resource(&pdev->dev, regs);
2919         if (IS_ERR(mem))
2920                 return PTR_ERR(mem);
2921 
2922         if (np) {
2923                 const struct of_device_id *match;
2924 
2925                 match = of_match_node(macb_dt_ids, np);
2926                 if (match && match->data) {
2927                         macb_config = match->data;
2928                         clk_init = macb_config->clk_init;
2929                         init = macb_config->init;
2930                 }
2931         }
2932 
2933         err = clk_init(pdev, &pclk, &hclk, &tx_clk);
2934         if (err)
2935                 return err;
2936 
2937         native_io = hw_is_native_io(mem);
2938 
2939         macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
2940         dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
2941         if (!dev) {
2942                 err = -ENOMEM;
2943                 goto err_disable_clocks;
2944         }
2945 
2946         dev->base_addr = regs->start;
2947 
2948         SET_NETDEV_DEV(dev, &pdev->dev);
2949 
2950         bp = netdev_priv(dev);
2951         bp->pdev = pdev;
2952         bp->dev = dev;
2953         bp->regs = mem;
2954         bp->native_io = native_io;
2955         if (native_io) {
2956                 bp->macb_reg_readl = hw_readl_native;
2957                 bp->macb_reg_writel = hw_writel_native;
2958         } else {
2959                 bp->macb_reg_readl = hw_readl;
2960                 bp->macb_reg_writel = hw_writel;
2961         }
2962         bp->num_queues = num_queues;
2963         bp->queue_mask = queue_mask;
2964         if (macb_config)
2965                 bp->dma_burst_length = macb_config->dma_burst_length;
2966         bp->pclk = pclk;
2967         bp->hclk = hclk;
2968         bp->tx_clk = tx_clk;
2969         if (macb_config)
2970                 bp->jumbo_max_len = macb_config->jumbo_max_len;
2971 
2972         bp->wol = 0;
2973         if (of_get_property(np, "magic-packet", NULL))
2974                 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
2975         device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
2976 
2977         spin_lock_init(&bp->lock);
2978 
2979         /* setup capabilities */
2980         macb_configure_caps(bp, macb_config);
2981 
2982         platform_set_drvdata(pdev, dev);
2983 
2984         dev->irq = platform_get_irq(pdev, 0);
2985         if (dev->irq < 0) {
2986                 err = dev->irq;
2987                 goto err_disable_clocks;
2988         }
2989 
2990         mac = of_get_mac_address(np);
2991         if (mac)
2992                 memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
2993         else
2994                 macb_get_hwaddr(bp);
2995 
2996         /* Power up the PHY if there is a GPIO reset */
2997         phy_node =  of_get_next_available_child(np, NULL);
2998         if (phy_node) {
2999                 int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
3000                 if (gpio_is_valid(gpio)) {
3001                         bp->reset_gpio = gpio_to_desc(gpio);
3002                         gpiod_direction_output(bp->reset_gpio, 1);
3003                 }
3004         }
3005         of_node_put(phy_node);
3006 
3007         err = of_get_phy_mode(np);
3008         if (err < 0) {
3009                 pdata = dev_get_platdata(&pdev->dev);
3010                 if (pdata && pdata->is_rmii)
3011                         bp->phy_interface = PHY_INTERFACE_MODE_RMII;
3012                 else
3013                         bp->phy_interface = PHY_INTERFACE_MODE_MII;
3014         } else {
3015                 bp->phy_interface = err;
3016         }
3017 
3018         /* IP specific init */
3019         err = init(pdev);
3020         if (err)
3021                 goto err_out_free_netdev;
3022 
3023         err = macb_mii_init(bp);
3024         if (err)
3025                 goto err_out_free_netdev;
3026 
3027         phydev = bp->phy_dev;
3028 
3029         netif_carrier_off(dev);
3030 
3031         err = register_netdev(dev);
3032         if (err) {
3033                 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
3034                 goto err_out_unregister_mdio;
3035         }
3036 
3037         phy_attached_info(phydev);
3038 
3039         netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
3040                     macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
3041                     dev->base_addr, dev->irq, dev->dev_addr);
3042 
3043         return 0;
3044 
3045 err_out_unregister_mdio:
3046         phy_disconnect(bp->phy_dev);
3047         mdiobus_unregister(bp->mii_bus);
3048         mdiobus_free(bp->mii_bus);
3049 
3050         /* Shutdown the PHY if there is a GPIO reset */
3051         if (bp->reset_gpio)
3052                 gpiod_set_value(bp->reset_gpio, 0);
3053 
3054 err_out_free_netdev:
3055         free_netdev(dev);
3056 
3057 err_disable_clocks:
3058         clk_disable_unprepare(tx_clk);
3059         clk_disable_unprepare(hclk);
3060         clk_disable_unprepare(pclk);
3061 
3062         return err;
3063 }
3064 
3065 static int macb_remove(struct platform_device *pdev)
3066 {
3067         struct net_device *dev;
3068         struct macb *bp;
3069 
3070         dev = platform_get_drvdata(pdev);
3071 
3072         if (dev) {
3073                 bp = netdev_priv(dev);
3074                 if (bp->phy_dev)
3075                         phy_disconnect(bp->phy_dev);
3076                 mdiobus_unregister(bp->mii_bus);
3077                 mdiobus_free(bp->mii_bus);
3078 
3079                 /* Shutdown the PHY if there is a GPIO reset */
3080                 if (bp->reset_gpio)
3081                         gpiod_set_value(bp->reset_gpio, 0);
3082 
3083                 unregister_netdev(dev);
3084                 clk_disable_unprepare(bp->tx_clk);
3085                 clk_disable_unprepare(bp->hclk);
3086                 clk_disable_unprepare(bp->pclk);
3087                 free_netdev(dev);
3088         }
3089 
3090         return 0;
3091 }
3092 
3093 static int __maybe_unused macb_suspend(struct device *dev)
3094 {
3095         struct platform_device *pdev = to_platform_device(dev);
3096         struct net_device *netdev = platform_get_drvdata(pdev);
3097         struct macb *bp = netdev_priv(netdev);
3098 
3099         netif_carrier_off(netdev);
3100         netif_device_detach(netdev);
3101 
3102         if (bp->wol & MACB_WOL_ENABLED) {
3103                 macb_writel(bp, IER, MACB_BIT(WOL));
3104                 macb_writel(bp, WOL, MACB_BIT(MAG));
3105                 enable_irq_wake(bp->queues[0].irq);
3106         } else {
3107                 clk_disable_unprepare(bp->tx_clk);
3108                 clk_disable_unprepare(bp->hclk);
3109                 clk_disable_unprepare(bp->pclk);
3110         }
3111 
3112         return 0;
3113 }
3114 
3115 static int __maybe_unused macb_resume(struct device *dev)
3116 {
3117         struct platform_device *pdev = to_platform_device(dev);
3118         struct net_device *netdev = platform_get_drvdata(pdev);
3119         struct macb *bp = netdev_priv(netdev);
3120 
3121         if (bp->wol & MACB_WOL_ENABLED) {
3122                 macb_writel(bp, IDR, MACB_BIT(WOL));
3123                 macb_writel(bp, WOL, 0);
3124                 disable_irq_wake(bp->queues[0].irq);
3125         } else {
3126                 clk_prepare_enable(bp->pclk);
3127                 clk_prepare_enable(bp->hclk);
3128                 clk_prepare_enable(bp->tx_clk);
3129         }
3130 
3131         netif_device_attach(netdev);
3132 
3133         return 0;
3134 }
3135 
3136 static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
3137 
3138 static struct platform_driver macb_driver = {
3139         .probe          = macb_probe,
3140         .remove         = macb_remove,
3141         .driver         = {
3142                 .name           = "macb",
3143                 .of_match_table = of_match_ptr(macb_dt_ids),
3144                 .pm     = &macb_pm_ops,
3145         },
3146 };
3147 
3148 module_platform_driver(macb_driver);
3149 
3150 MODULE_LICENSE("GPL");
3151 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
3152 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
3153 MODULE_ALIAS("platform:macb");
3154 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us