Version:  2.0.40 2.2.26 2.4.37 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5

Linux/drivers/net/ethernet/cadence/macb.c

  1 /*
  2  * Cadence MACB/GEM Ethernet Controller driver
  3  *
  4  * Copyright (C) 2004-2006 Atmel Corporation
  5  *
  6  * This program is free software; you can redistribute it and/or modify
  7  * it under the terms of the GNU General Public License version 2 as
  8  * published by the Free Software Foundation.
  9  */
 10 
 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 12 #include <linux/clk.h>
 13 #include <linux/module.h>
 14 #include <linux/moduleparam.h>
 15 #include <linux/kernel.h>
 16 #include <linux/types.h>
 17 #include <linux/circ_buf.h>
 18 #include <linux/slab.h>
 19 #include <linux/init.h>
 20 #include <linux/io.h>
 21 #include <linux/gpio.h>
 22 #include <linux/gpio/consumer.h>
 23 #include <linux/interrupt.h>
 24 #include <linux/netdevice.h>
 25 #include <linux/etherdevice.h>
 26 #include <linux/dma-mapping.h>
 27 #include <linux/platform_data/macb.h>
 28 #include <linux/platform_device.h>
 29 #include <linux/phy.h>
 30 #include <linux/of.h>
 31 #include <linux/of_device.h>
 32 #include <linux/of_gpio.h>
 33 #include <linux/of_mdio.h>
 34 #include <linux/of_net.h>
 35 
 36 #include "macb.h"
 37 
 38 #define MACB_RX_BUFFER_SIZE     128
 39 #define RX_BUFFER_MULTIPLE      64  /* bytes */
 40 #define RX_RING_SIZE            512 /* must be power of 2 */
 41 #define RX_RING_BYTES           (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 42 
 43 #define TX_RING_SIZE            128 /* must be power of 2 */
 44 #define TX_RING_BYTES           (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 45 
 46 /* level of occupied TX descriptors under which we wake up TX process */
 47 #define MACB_TX_WAKEUP_THRESH   (3 * TX_RING_SIZE / 4)
 48 
 49 #define MACB_RX_INT_FLAGS       (MACB_BIT(RCOMP) | MACB_BIT(RXUBR)      \
 50                                  | MACB_BIT(ISR_ROVR))
 51 #define MACB_TX_ERR_FLAGS       (MACB_BIT(ISR_TUND)                     \
 52                                         | MACB_BIT(ISR_RLE)             \
 53                                         | MACB_BIT(TXERR))
 54 #define MACB_TX_INT_FLAGS       (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 55 
 56 #define MACB_MAX_TX_LEN         ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 57 #define GEM_MAX_TX_LEN          ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 58 
 59 #define GEM_MTU_MIN_SIZE        68
 60 
 61 /*
 62  * Graceful stop timeouts in us. We should allow up to
 63  * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
 64  */
 65 #define MACB_HALT_TIMEOUT       1230
 66 
 67 /* Ring buffer accessors */
 68 static unsigned int macb_tx_ring_wrap(unsigned int index)
 69 {
 70         return index & (TX_RING_SIZE - 1);
 71 }
 72 
 73 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
 74                                           unsigned int index)
 75 {
 76         return &queue->tx_ring[macb_tx_ring_wrap(index)];
 77 }
 78 
 79 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
 80                                        unsigned int index)
 81 {
 82         return &queue->tx_skb[macb_tx_ring_wrap(index)];
 83 }
 84 
 85 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
 86 {
 87         dma_addr_t offset;
 88 
 89         offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
 90 
 91         return queue->tx_ring_dma + offset;
 92 }
 93 
 94 static unsigned int macb_rx_ring_wrap(unsigned int index)
 95 {
 96         return index & (RX_RING_SIZE - 1);
 97 }
 98 
 99 static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
100 {
101         return &bp->rx_ring[macb_rx_ring_wrap(index)];
102 }
103 
104 static void *macb_rx_buffer(struct macb *bp, unsigned int index)
105 {
106         return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
107 }
108 
109 /* I/O accessors */
110 static u32 hw_readl_native(struct macb *bp, int offset)
111 {
112         return __raw_readl(bp->regs + offset);
113 }
114 
115 static void hw_writel_native(struct macb *bp, int offset, u32 value)
116 {
117         __raw_writel(value, bp->regs + offset);
118 }
119 
120 static u32 hw_readl(struct macb *bp, int offset)
121 {
122         return readl_relaxed(bp->regs + offset);
123 }
124 
125 static void hw_writel(struct macb *bp, int offset, u32 value)
126 {
127         writel_relaxed(value, bp->regs + offset);
128 }
129 
130 /*
131  * Find the CPU endianness by using the loopback bit of NCR register. When the
132  * CPU is in big endian we need to program swaped mode for management
133  * descriptor access.
134  */
135 static bool hw_is_native_io(void __iomem *addr)
136 {
137         u32 value = MACB_BIT(LLB);
138 
139         __raw_writel(value, addr + MACB_NCR);
140         value = __raw_readl(addr + MACB_NCR);
141 
142         /* Write 0 back to disable everything */
143         __raw_writel(0, addr + MACB_NCR);
144 
145         return value == MACB_BIT(LLB);
146 }
147 
148 static bool hw_is_gem(void __iomem *addr, bool native_io)
149 {
150         u32 id;
151 
152         if (native_io)
153                 id = __raw_readl(addr + MACB_MID);
154         else
155                 id = readl_relaxed(addr + MACB_MID);
156 
157         return MACB_BFEXT(IDNUM, id) >= 0x2;
158 }
159 
160 static void macb_set_hwaddr(struct macb *bp)
161 {
162         u32 bottom;
163         u16 top;
164 
165         bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
166         macb_or_gem_writel(bp, SA1B, bottom);
167         top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
168         macb_or_gem_writel(bp, SA1T, top);
169 
170         /* Clear unused address register sets */
171         macb_or_gem_writel(bp, SA2B, 0);
172         macb_or_gem_writel(bp, SA2T, 0);
173         macb_or_gem_writel(bp, SA3B, 0);
174         macb_or_gem_writel(bp, SA3T, 0);
175         macb_or_gem_writel(bp, SA4B, 0);
176         macb_or_gem_writel(bp, SA4T, 0);
177 }
178 
179 static void macb_get_hwaddr(struct macb *bp)
180 {
181         struct macb_platform_data *pdata;
182         u32 bottom;
183         u16 top;
184         u8 addr[6];
185         int i;
186 
187         pdata = dev_get_platdata(&bp->pdev->dev);
188 
189         /* Check all 4 address register for vaild address */
190         for (i = 0; i < 4; i++) {
191                 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
192                 top = macb_or_gem_readl(bp, SA1T + i * 8);
193 
194                 if (pdata && pdata->rev_eth_addr) {
195                         addr[5] = bottom & 0xff;
196                         addr[4] = (bottom >> 8) & 0xff;
197                         addr[3] = (bottom >> 16) & 0xff;
198                         addr[2] = (bottom >> 24) & 0xff;
199                         addr[1] = top & 0xff;
200                         addr[0] = (top & 0xff00) >> 8;
201                 } else {
202                         addr[0] = bottom & 0xff;
203                         addr[1] = (bottom >> 8) & 0xff;
204                         addr[2] = (bottom >> 16) & 0xff;
205                         addr[3] = (bottom >> 24) & 0xff;
206                         addr[4] = top & 0xff;
207                         addr[5] = (top >> 8) & 0xff;
208                 }
209 
210                 if (is_valid_ether_addr(addr)) {
211                         memcpy(bp->dev->dev_addr, addr, sizeof(addr));
212                         return;
213                 }
214         }
215 
216         dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
217         eth_hw_addr_random(bp->dev);
218 }
219 
220 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
221 {
222         struct macb *bp = bus->priv;
223         int value;
224 
225         macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
226                               | MACB_BF(RW, MACB_MAN_READ)
227                               | MACB_BF(PHYA, mii_id)
228                               | MACB_BF(REGA, regnum)
229                               | MACB_BF(CODE, MACB_MAN_CODE)));
230 
231         /* wait for end of transfer */
232         while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
233                 cpu_relax();
234 
235         value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
236 
237         return value;
238 }
239 
240 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
241                            u16 value)
242 {
243         struct macb *bp = bus->priv;
244 
245         macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
246                               | MACB_BF(RW, MACB_MAN_WRITE)
247                               | MACB_BF(PHYA, mii_id)
248                               | MACB_BF(REGA, regnum)
249                               | MACB_BF(CODE, MACB_MAN_CODE)
250                               | MACB_BF(DATA, value)));
251 
252         /* wait for end of transfer */
253         while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
254                 cpu_relax();
255 
256         return 0;
257 }
258 
259 /**
260  * macb_set_tx_clk() - Set a clock to a new frequency
261  * @clk         Pointer to the clock to change
262  * @rate        New frequency in Hz
263  * @dev         Pointer to the struct net_device
264  */
265 static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
266 {
267         long ferr, rate, rate_rounded;
268 
269         if (!clk)
270                 return;
271 
272         switch (speed) {
273         case SPEED_10:
274                 rate = 2500000;
275                 break;
276         case SPEED_100:
277                 rate = 25000000;
278                 break;
279         case SPEED_1000:
280                 rate = 125000000;
281                 break;
282         default:
283                 return;
284         }
285 
286         rate_rounded = clk_round_rate(clk, rate);
287         if (rate_rounded < 0)
288                 return;
289 
290         /* RGMII allows 50 ppm frequency error. Test and warn if this limit
291          * is not satisfied.
292          */
293         ferr = abs(rate_rounded - rate);
294         ferr = DIV_ROUND_UP(ferr, rate / 100000);
295         if (ferr > 5)
296                 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
297                                 rate);
298 
299         if (clk_set_rate(clk, rate_rounded))
300                 netdev_err(dev, "adjusting tx_clk failed.\n");
301 }
302 
303 static void macb_handle_link_change(struct net_device *dev)
304 {
305         struct macb *bp = netdev_priv(dev);
306         struct phy_device *phydev = bp->phy_dev;
307         unsigned long flags;
308         int status_change = 0;
309 
310         spin_lock_irqsave(&bp->lock, flags);
311 
312         if (phydev->link) {
313                 if ((bp->speed != phydev->speed) ||
314                     (bp->duplex != phydev->duplex)) {
315                         u32 reg;
316 
317                         reg = macb_readl(bp, NCFGR);
318                         reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
319                         if (macb_is_gem(bp))
320                                 reg &= ~GEM_BIT(GBE);
321 
322                         if (phydev->duplex)
323                                 reg |= MACB_BIT(FD);
324                         if (phydev->speed == SPEED_100)
325                                 reg |= MACB_BIT(SPD);
326                         if (phydev->speed == SPEED_1000 &&
327                             bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
328                                 reg |= GEM_BIT(GBE);
329 
330                         macb_or_gem_writel(bp, NCFGR, reg);
331 
332                         bp->speed = phydev->speed;
333                         bp->duplex = phydev->duplex;
334                         status_change = 1;
335                 }
336         }
337 
338         if (phydev->link != bp->link) {
339                 if (!phydev->link) {
340                         bp->speed = 0;
341                         bp->duplex = -1;
342                 }
343                 bp->link = phydev->link;
344 
345                 status_change = 1;
346         }
347 
348         spin_unlock_irqrestore(&bp->lock, flags);
349 
350         if (status_change) {
351                 if (phydev->link) {
352                         /* Update the TX clock rate if and only if the link is
353                          * up and there has been a link change.
354                          */
355                         macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
356 
357                         netif_carrier_on(dev);
358                         netdev_info(dev, "link up (%d/%s)\n",
359                                     phydev->speed,
360                                     phydev->duplex == DUPLEX_FULL ?
361                                     "Full" : "Half");
362                 } else {
363                         netif_carrier_off(dev);
364                         netdev_info(dev, "link down\n");
365                 }
366         }
367 }
368 
369 /* based on au1000_eth. c*/
370 static int macb_mii_probe(struct net_device *dev)
371 {
372         struct macb *bp = netdev_priv(dev);
373         struct macb_platform_data *pdata;
374         struct phy_device *phydev;
375         int phy_irq;
376         int ret;
377 
378         phydev = phy_find_first(bp->mii_bus);
379         if (!phydev) {
380                 netdev_err(dev, "no PHY found\n");
381                 return -ENXIO;
382         }
383 
384         pdata = dev_get_platdata(&bp->pdev->dev);
385         if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
386                 ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
387                 if (!ret) {
388                         phy_irq = gpio_to_irq(pdata->phy_irq_pin);
389                         phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
390                 }
391         }
392 
393         /* attach the mac to the phy */
394         ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
395                                  bp->phy_interface);
396         if (ret) {
397                 netdev_err(dev, "Could not attach to PHY\n");
398                 return ret;
399         }
400 
401         /* mask with MAC supported features */
402         if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
403                 phydev->supported &= PHY_GBIT_FEATURES;
404         else
405                 phydev->supported &= PHY_BASIC_FEATURES;
406 
407         if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
408                 phydev->supported &= ~SUPPORTED_1000baseT_Half;
409 
410         phydev->advertising = phydev->supported;
411 
412         bp->link = 0;
413         bp->speed = 0;
414         bp->duplex = -1;
415         bp->phy_dev = phydev;
416 
417         return 0;
418 }
419 
420 static int macb_mii_init(struct macb *bp)
421 {
422         struct macb_platform_data *pdata;
423         struct device_node *np;
424         int err = -ENXIO, i;
425 
426         /* Enable management port */
427         macb_writel(bp, NCR, MACB_BIT(MPE));
428 
429         bp->mii_bus = mdiobus_alloc();
430         if (bp->mii_bus == NULL) {
431                 err = -ENOMEM;
432                 goto err_out;
433         }
434 
435         bp->mii_bus->name = "MACB_mii_bus";
436         bp->mii_bus->read = &macb_mdio_read;
437         bp->mii_bus->write = &macb_mdio_write;
438         snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
439                 bp->pdev->name, bp->pdev->id);
440         bp->mii_bus->priv = bp;
441         bp->mii_bus->parent = &bp->dev->dev;
442         pdata = dev_get_platdata(&bp->pdev->dev);
443 
444         dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
445 
446         np = bp->pdev->dev.of_node;
447         if (np) {
448                 /* try dt phy registration */
449                 err = of_mdiobus_register(bp->mii_bus, np);
450 
451                 /* fallback to standard phy registration if no phy were
452                    found during dt phy registration */
453                 if (!err && !phy_find_first(bp->mii_bus)) {
454                         for (i = 0; i < PHY_MAX_ADDR; i++) {
455                                 struct phy_device *phydev;
456 
457                                 phydev = mdiobus_scan(bp->mii_bus, i);
458                                 if (IS_ERR(phydev)) {
459                                         err = PTR_ERR(phydev);
460                                         break;
461                                 }
462                         }
463 
464                         if (err)
465                                 goto err_out_unregister_bus;
466                 }
467         } else {
468                 if (pdata)
469                         bp->mii_bus->phy_mask = pdata->phy_mask;
470 
471                 err = mdiobus_register(bp->mii_bus);
472         }
473 
474         if (err)
475                 goto err_out_free_mdiobus;
476 
477         err = macb_mii_probe(bp->dev);
478         if (err)
479                 goto err_out_unregister_bus;
480 
481         return 0;
482 
483 err_out_unregister_bus:
484         mdiobus_unregister(bp->mii_bus);
485 err_out_free_mdiobus:
486         mdiobus_free(bp->mii_bus);
487 err_out:
488         return err;
489 }
490 
491 static void macb_update_stats(struct macb *bp)
492 {
493         u32 *p = &bp->hw_stats.macb.rx_pause_frames;
494         u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
495         int offset = MACB_PFR;
496 
497         WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
498 
499         for(; p < end; p++, offset += 4)
500                 *p += bp->macb_reg_readl(bp, offset);
501 }
502 
503 static int macb_halt_tx(struct macb *bp)
504 {
505         unsigned long   halt_time, timeout;
506         u32             status;
507 
508         macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
509 
510         timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
511         do {
512                 halt_time = jiffies;
513                 status = macb_readl(bp, TSR);
514                 if (!(status & MACB_BIT(TGO)))
515                         return 0;
516 
517                 usleep_range(10, 250);
518         } while (time_before(halt_time, timeout));
519 
520         return -ETIMEDOUT;
521 }
522 
523 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
524 {
525         if (tx_skb->mapping) {
526                 if (tx_skb->mapped_as_page)
527                         dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
528                                        tx_skb->size, DMA_TO_DEVICE);
529                 else
530                         dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
531                                          tx_skb->size, DMA_TO_DEVICE);
532                 tx_skb->mapping = 0;
533         }
534 
535         if (tx_skb->skb) {
536                 dev_kfree_skb_any(tx_skb->skb);
537                 tx_skb->skb = NULL;
538         }
539 }
540 
541 static void macb_tx_error_task(struct work_struct *work)
542 {
543         struct macb_queue       *queue = container_of(work, struct macb_queue,
544                                                       tx_error_task);
545         struct macb             *bp = queue->bp;
546         struct macb_tx_skb      *tx_skb;
547         struct macb_dma_desc    *desc;
548         struct sk_buff          *skb;
549         unsigned int            tail;
550         unsigned long           flags;
551 
552         netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
553                     (unsigned int)(queue - bp->queues),
554                     queue->tx_tail, queue->tx_head);
555 
556         /* Prevent the queue IRQ handlers from running: each of them may call
557          * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
558          * As explained below, we have to halt the transmission before updating
559          * TBQP registers so we call netif_tx_stop_all_queues() to notify the
560          * network engine about the macb/gem being halted.
561          */
562         spin_lock_irqsave(&bp->lock, flags);
563 
564         /* Make sure nobody is trying to queue up new packets */
565         netif_tx_stop_all_queues(bp->dev);
566 
567         /*
568          * Stop transmission now
569          * (in case we have just queued new packets)
570          * macb/gem must be halted to write TBQP register
571          */
572         if (macb_halt_tx(bp))
573                 /* Just complain for now, reinitializing TX path can be good */
574                 netdev_err(bp->dev, "BUG: halt tx timed out\n");
575 
576         /*
577          * Treat frames in TX queue including the ones that caused the error.
578          * Free transmit buffers in upper layer.
579          */
580         for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
581                 u32     ctrl;
582 
583                 desc = macb_tx_desc(queue, tail);
584                 ctrl = desc->ctrl;
585                 tx_skb = macb_tx_skb(queue, tail);
586                 skb = tx_skb->skb;
587 
588                 if (ctrl & MACB_BIT(TX_USED)) {
589                         /* skb is set for the last buffer of the frame */
590                         while (!skb) {
591                                 macb_tx_unmap(bp, tx_skb);
592                                 tail++;
593                                 tx_skb = macb_tx_skb(queue, tail);
594                                 skb = tx_skb->skb;
595                         }
596 
597                         /* ctrl still refers to the first buffer descriptor
598                          * since it's the only one written back by the hardware
599                          */
600                         if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
601                                 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
602                                             macb_tx_ring_wrap(tail), skb->data);
603                                 bp->stats.tx_packets++;
604                                 bp->stats.tx_bytes += skb->len;
605                         }
606                 } else {
607                         /*
608                          * "Buffers exhausted mid-frame" errors may only happen
609                          * if the driver is buggy, so complain loudly about those.
610                          * Statistics are updated by hardware.
611                          */
612                         if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
613                                 netdev_err(bp->dev,
614                                            "BUG: TX buffers exhausted mid-frame\n");
615 
616                         desc->ctrl = ctrl | MACB_BIT(TX_USED);
617                 }
618 
619                 macb_tx_unmap(bp, tx_skb);
620         }
621 
622         /* Set end of TX queue */
623         desc = macb_tx_desc(queue, 0);
624         desc->addr = 0;
625         desc->ctrl = MACB_BIT(TX_USED);
626 
627         /* Make descriptor updates visible to hardware */
628         wmb();
629 
630         /* Reinitialize the TX desc queue */
631         queue_writel(queue, TBQP, queue->tx_ring_dma);
632         /* Make TX ring reflect state of hardware */
633         queue->tx_head = 0;
634         queue->tx_tail = 0;
635 
636         /* Housework before enabling TX IRQ */
637         macb_writel(bp, TSR, macb_readl(bp, TSR));
638         queue_writel(queue, IER, MACB_TX_INT_FLAGS);
639 
640         /* Now we are ready to start transmission again */
641         netif_tx_start_all_queues(bp->dev);
642         macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
643 
644         spin_unlock_irqrestore(&bp->lock, flags);
645 }
646 
647 static void macb_tx_interrupt(struct macb_queue *queue)
648 {
649         unsigned int tail;
650         unsigned int head;
651         u32 status;
652         struct macb *bp = queue->bp;
653         u16 queue_index = queue - bp->queues;
654 
655         status = macb_readl(bp, TSR);
656         macb_writel(bp, TSR, status);
657 
658         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
659                 queue_writel(queue, ISR, MACB_BIT(TCOMP));
660 
661         netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
662                 (unsigned long)status);
663 
664         head = queue->tx_head;
665         for (tail = queue->tx_tail; tail != head; tail++) {
666                 struct macb_tx_skb      *tx_skb;
667                 struct sk_buff          *skb;
668                 struct macb_dma_desc    *desc;
669                 u32                     ctrl;
670 
671                 desc = macb_tx_desc(queue, tail);
672 
673                 /* Make hw descriptor updates visible to CPU */
674                 rmb();
675 
676                 ctrl = desc->ctrl;
677 
678                 /* TX_USED bit is only set by hardware on the very first buffer
679                  * descriptor of the transmitted frame.
680                  */
681                 if (!(ctrl & MACB_BIT(TX_USED)))
682                         break;
683 
684                 /* Process all buffers of the current transmitted frame */
685                 for (;; tail++) {
686                         tx_skb = macb_tx_skb(queue, tail);
687                         skb = tx_skb->skb;
688 
689                         /* First, update TX stats if needed */
690                         if (skb) {
691                                 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
692                                             macb_tx_ring_wrap(tail), skb->data);
693                                 bp->stats.tx_packets++;
694                                 bp->stats.tx_bytes += skb->len;
695                         }
696 
697                         /* Now we can safely release resources */
698                         macb_tx_unmap(bp, tx_skb);
699 
700                         /* skb is set only for the last buffer of the frame.
701                          * WARNING: at this point skb has been freed by
702                          * macb_tx_unmap().
703                          */
704                         if (skb)
705                                 break;
706                 }
707         }
708 
709         queue->tx_tail = tail;
710         if (__netif_subqueue_stopped(bp->dev, queue_index) &&
711             CIRC_CNT(queue->tx_head, queue->tx_tail,
712                      TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
713                 netif_wake_subqueue(bp->dev, queue_index);
714 }
715 
716 static void gem_rx_refill(struct macb *bp)
717 {
718         unsigned int            entry;
719         struct sk_buff          *skb;
720         dma_addr_t              paddr;
721 
722         while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
723                 entry = macb_rx_ring_wrap(bp->rx_prepared_head);
724 
725                 /* Make hw descriptor updates visible to CPU */
726                 rmb();
727 
728                 bp->rx_prepared_head++;
729 
730                 if (bp->rx_skbuff[entry] == NULL) {
731                         /* allocate sk_buff for this free entry in ring */
732                         skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
733                         if (unlikely(skb == NULL)) {
734                                 netdev_err(bp->dev,
735                                            "Unable to allocate sk_buff\n");
736                                 break;
737                         }
738 
739                         /* now fill corresponding descriptor entry */
740                         paddr = dma_map_single(&bp->pdev->dev, skb->data,
741                                                bp->rx_buffer_size, DMA_FROM_DEVICE);
742                         if (dma_mapping_error(&bp->pdev->dev, paddr)) {
743                                 dev_kfree_skb(skb);
744                                 break;
745                         }
746 
747                         bp->rx_skbuff[entry] = skb;
748 
749                         if (entry == RX_RING_SIZE - 1)
750                                 paddr |= MACB_BIT(RX_WRAP);
751                         bp->rx_ring[entry].addr = paddr;
752                         bp->rx_ring[entry].ctrl = 0;
753 
754                         /* properly align Ethernet header */
755                         skb_reserve(skb, NET_IP_ALIGN);
756                 } else {
757                         bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
758                         bp->rx_ring[entry].ctrl = 0;
759                 }
760         }
761 
762         /* Make descriptor updates visible to hardware */
763         wmb();
764 
765         netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
766                    bp->rx_prepared_head, bp->rx_tail);
767 }
768 
769 /* Mark DMA descriptors from begin up to and not including end as unused */
770 static void discard_partial_frame(struct macb *bp, unsigned int begin,
771                                   unsigned int end)
772 {
773         unsigned int frag;
774 
775         for (frag = begin; frag != end; frag++) {
776                 struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
777                 desc->addr &= ~MACB_BIT(RX_USED);
778         }
779 
780         /* Make descriptor updates visible to hardware */
781         wmb();
782 
783         /*
784          * When this happens, the hardware stats registers for
785          * whatever caused this is updated, so we don't have to record
786          * anything.
787          */
788 }
789 
790 static int gem_rx(struct macb *bp, int budget)
791 {
792         unsigned int            len;
793         unsigned int            entry;
794         struct sk_buff          *skb;
795         struct macb_dma_desc    *desc;
796         int                     count = 0;
797 
798         while (count < budget) {
799                 u32 addr, ctrl;
800 
801                 entry = macb_rx_ring_wrap(bp->rx_tail);
802                 desc = &bp->rx_ring[entry];
803 
804                 /* Make hw descriptor updates visible to CPU */
805                 rmb();
806 
807                 addr = desc->addr;
808                 ctrl = desc->ctrl;
809 
810                 if (!(addr & MACB_BIT(RX_USED)))
811                         break;
812 
813                 bp->rx_tail++;
814                 count++;
815 
816                 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
817                         netdev_err(bp->dev,
818                                    "not whole frame pointed by descriptor\n");
819                         bp->stats.rx_dropped++;
820                         break;
821                 }
822                 skb = bp->rx_skbuff[entry];
823                 if (unlikely(!skb)) {
824                         netdev_err(bp->dev,
825                                    "inconsistent Rx descriptor chain\n");
826                         bp->stats.rx_dropped++;
827                         break;
828                 }
829                 /* now everything is ready for receiving packet */
830                 bp->rx_skbuff[entry] = NULL;
831                 len = ctrl & bp->rx_frm_len_mask;
832 
833                 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
834 
835                 skb_put(skb, len);
836                 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
837                 dma_unmap_single(&bp->pdev->dev, addr,
838                                  bp->rx_buffer_size, DMA_FROM_DEVICE);
839 
840                 skb->protocol = eth_type_trans(skb, bp->dev);
841                 skb_checksum_none_assert(skb);
842                 if (bp->dev->features & NETIF_F_RXCSUM &&
843                     !(bp->dev->flags & IFF_PROMISC) &&
844                     GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
845                         skb->ip_summed = CHECKSUM_UNNECESSARY;
846 
847                 bp->stats.rx_packets++;
848                 bp->stats.rx_bytes += skb->len;
849 
850 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
851                 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
852                             skb->len, skb->csum);
853                 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
854                                skb_mac_header(skb), 16, true);
855                 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
856                                skb->data, 32, true);
857 #endif
858 
859                 netif_receive_skb(skb);
860         }
861 
862         gem_rx_refill(bp);
863 
864         return count;
865 }
866 
867 static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
868                          unsigned int last_frag)
869 {
870         unsigned int len;
871         unsigned int frag;
872         unsigned int offset;
873         struct sk_buff *skb;
874         struct macb_dma_desc *desc;
875 
876         desc = macb_rx_desc(bp, last_frag);
877         len = desc->ctrl & bp->rx_frm_len_mask;
878 
879         netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
880                 macb_rx_ring_wrap(first_frag),
881                 macb_rx_ring_wrap(last_frag), len);
882 
883         /*
884          * The ethernet header starts NET_IP_ALIGN bytes into the
885          * first buffer. Since the header is 14 bytes, this makes the
886          * payload word-aligned.
887          *
888          * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
889          * the two padding bytes into the skb so that we avoid hitting
890          * the slowpath in memcpy(), and pull them off afterwards.
891          */
892         skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
893         if (!skb) {
894                 bp->stats.rx_dropped++;
895                 for (frag = first_frag; ; frag++) {
896                         desc = macb_rx_desc(bp, frag);
897                         desc->addr &= ~MACB_BIT(RX_USED);
898                         if (frag == last_frag)
899                                 break;
900                 }
901 
902                 /* Make descriptor updates visible to hardware */
903                 wmb();
904 
905                 return 1;
906         }
907 
908         offset = 0;
909         len += NET_IP_ALIGN;
910         skb_checksum_none_assert(skb);
911         skb_put(skb, len);
912 
913         for (frag = first_frag; ; frag++) {
914                 unsigned int frag_len = bp->rx_buffer_size;
915 
916                 if (offset + frag_len > len) {
917                         BUG_ON(frag != last_frag);
918                         frag_len = len - offset;
919                 }
920                 skb_copy_to_linear_data_offset(skb, offset,
921                                 macb_rx_buffer(bp, frag), frag_len);
922                 offset += bp->rx_buffer_size;
923                 desc = macb_rx_desc(bp, frag);
924                 desc->addr &= ~MACB_BIT(RX_USED);
925 
926                 if (frag == last_frag)
927                         break;
928         }
929 
930         /* Make descriptor updates visible to hardware */
931         wmb();
932 
933         __skb_pull(skb, NET_IP_ALIGN);
934         skb->protocol = eth_type_trans(skb, bp->dev);
935 
936         bp->stats.rx_packets++;
937         bp->stats.rx_bytes += skb->len;
938         netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
939                    skb->len, skb->csum);
940         netif_receive_skb(skb);
941 
942         return 0;
943 }
944 
945 static int macb_rx(struct macb *bp, int budget)
946 {
947         int received = 0;
948         unsigned int tail;
949         int first_frag = -1;
950 
951         for (tail = bp->rx_tail; budget > 0; tail++) {
952                 struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
953                 u32 addr, ctrl;
954 
955                 /* Make hw descriptor updates visible to CPU */
956                 rmb();
957 
958                 addr = desc->addr;
959                 ctrl = desc->ctrl;
960 
961                 if (!(addr & MACB_BIT(RX_USED)))
962                         break;
963 
964                 if (ctrl & MACB_BIT(RX_SOF)) {
965                         if (first_frag != -1)
966                                 discard_partial_frame(bp, first_frag, tail);
967                         first_frag = tail;
968                 }
969 
970                 if (ctrl & MACB_BIT(RX_EOF)) {
971                         int dropped;
972                         BUG_ON(first_frag == -1);
973 
974                         dropped = macb_rx_frame(bp, first_frag, tail);
975                         first_frag = -1;
976                         if (!dropped) {
977                                 received++;
978                                 budget--;
979                         }
980                 }
981         }
982 
983         if (first_frag != -1)
984                 bp->rx_tail = first_frag;
985         else
986                 bp->rx_tail = tail;
987 
988         return received;
989 }
990 
991 static int macb_poll(struct napi_struct *napi, int budget)
992 {
993         struct macb *bp = container_of(napi, struct macb, napi);
994         int work_done;
995         u32 status;
996 
997         status = macb_readl(bp, RSR);
998         macb_writel(bp, RSR, status);
999 
1000         work_done = 0;
1001 
1002         netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
1003                    (unsigned long)status, budget);
1004 
1005         work_done = bp->macbgem_ops.mog_rx(bp, budget);
1006         if (work_done < budget) {
1007                 napi_complete(napi);
1008 
1009                 /* Packets received while interrupts were disabled */
1010                 status = macb_readl(bp, RSR);
1011                 if (status) {
1012                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1013                                 macb_writel(bp, ISR, MACB_BIT(RCOMP));
1014                         napi_reschedule(napi);
1015                 } else {
1016                         macb_writel(bp, IER, MACB_RX_INT_FLAGS);
1017                 }
1018         }
1019 
1020         /* TODO: Handle errors */
1021 
1022         return work_done;
1023 }
1024 
1025 static irqreturn_t macb_interrupt(int irq, void *dev_id)
1026 {
1027         struct macb_queue *queue = dev_id;
1028         struct macb *bp = queue->bp;
1029         struct net_device *dev = bp->dev;
1030         u32 status, ctrl;
1031 
1032         status = queue_readl(queue, ISR);
1033 
1034         if (unlikely(!status))
1035                 return IRQ_NONE;
1036 
1037         spin_lock(&bp->lock);
1038 
1039         while (status) {
1040                 /* close possible race with dev_close */
1041                 if (unlikely(!netif_running(dev))) {
1042                         queue_writel(queue, IDR, -1);
1043                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1044                                 queue_writel(queue, ISR, -1);
1045                         break;
1046                 }
1047 
1048                 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1049                             (unsigned int)(queue - bp->queues),
1050                             (unsigned long)status);
1051 
1052                 if (status & MACB_RX_INT_FLAGS) {
1053                         /*
1054                          * There's no point taking any more interrupts
1055                          * until we have processed the buffers. The
1056                          * scheduling call may fail if the poll routine
1057                          * is already scheduled, so disable interrupts
1058                          * now.
1059                          */
1060                         queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
1061                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1062                                 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1063 
1064                         if (napi_schedule_prep(&bp->napi)) {
1065                                 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1066                                 __napi_schedule(&bp->napi);
1067                         }
1068                 }
1069 
1070                 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
1071                         queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1072                         schedule_work(&queue->tx_error_task);
1073 
1074                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1075                                 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1076 
1077                         break;
1078                 }
1079 
1080                 if (status & MACB_BIT(TCOMP))
1081                         macb_tx_interrupt(queue);
1082 
1083                 /*
1084                  * Link change detection isn't possible with RMII, so we'll
1085                  * add that if/when we get our hands on a full-blown MII PHY.
1086                  */
1087 
1088                 /* There is a hardware issue under heavy load where DMA can
1089                  * stop, this causes endless "used buffer descriptor read"
1090                  * interrupts but it can be cleared by re-enabling RX. See
1091                  * the at91 manual, section 41.3.1 or the Zynq manual
1092                  * section 16.7.4 for details.
1093                  */
1094                 if (status & MACB_BIT(RXUBR)) {
1095                         ctrl = macb_readl(bp, NCR);
1096                         macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1097                         macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1098 
1099                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1100                                 macb_writel(bp, ISR, MACB_BIT(RXUBR));
1101                 }
1102 
1103                 if (status & MACB_BIT(ISR_ROVR)) {
1104                         /* We missed at least one packet */
1105                         if (macb_is_gem(bp))
1106                                 bp->hw_stats.gem.rx_overruns++;
1107                         else
1108                                 bp->hw_stats.macb.rx_overruns++;
1109 
1110                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1111                                 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
1112                 }
1113 
1114                 if (status & MACB_BIT(HRESP)) {
1115                         /*
1116                          * TODO: Reset the hardware, and maybe move the
1117                          * netdev_err to a lower-priority context as well
1118                          * (work queue?)
1119                          */
1120                         netdev_err(dev, "DMA bus error: HRESP not OK\n");
1121 
1122                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1123                                 queue_writel(queue, ISR, MACB_BIT(HRESP));
1124                 }
1125 
1126                 status = queue_readl(queue, ISR);
1127         }
1128 
1129         spin_unlock(&bp->lock);
1130 
1131         return IRQ_HANDLED;
1132 }
1133 
1134 #ifdef CONFIG_NET_POLL_CONTROLLER
1135 /*
1136  * Polling receive - used by netconsole and other diagnostic tools
1137  * to allow network i/o with interrupts disabled.
1138  */
1139 static void macb_poll_controller(struct net_device *dev)
1140 {
1141         struct macb *bp = netdev_priv(dev);
1142         struct macb_queue *queue;
1143         unsigned long flags;
1144         unsigned int q;
1145 
1146         local_irq_save(flags);
1147         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1148                 macb_interrupt(dev->irq, queue);
1149         local_irq_restore(flags);
1150 }
1151 #endif
1152 
1153 static unsigned int macb_tx_map(struct macb *bp,
1154                                 struct macb_queue *queue,
1155                                 struct sk_buff *skb)
1156 {
1157         dma_addr_t mapping;
1158         unsigned int len, entry, i, tx_head = queue->tx_head;
1159         struct macb_tx_skb *tx_skb = NULL;
1160         struct macb_dma_desc *desc;
1161         unsigned int offset, size, count = 0;
1162         unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
1163         unsigned int eof = 1;
1164         u32 ctrl;
1165 
1166         /* First, map non-paged data */
1167         len = skb_headlen(skb);
1168         offset = 0;
1169         while (len) {
1170                 size = min(len, bp->max_tx_length);
1171                 entry = macb_tx_ring_wrap(tx_head);
1172                 tx_skb = &queue->tx_skb[entry];
1173 
1174                 mapping = dma_map_single(&bp->pdev->dev,
1175                                          skb->data + offset,
1176                                          size, DMA_TO_DEVICE);
1177                 if (dma_mapping_error(&bp->pdev->dev, mapping))
1178                         goto dma_error;
1179 
1180                 /* Save info to properly release resources */
1181                 tx_skb->skb = NULL;
1182                 tx_skb->mapping = mapping;
1183                 tx_skb->size = size;
1184                 tx_skb->mapped_as_page = false;
1185 
1186                 len -= size;
1187                 offset += size;
1188                 count++;
1189                 tx_head++;
1190         }
1191 
1192         /* Then, map paged data from fragments */
1193         for (f = 0; f < nr_frags; f++) {
1194                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1195 
1196                 len = skb_frag_size(frag);
1197                 offset = 0;
1198                 while (len) {
1199                         size = min(len, bp->max_tx_length);
1200                         entry = macb_tx_ring_wrap(tx_head);
1201                         tx_skb = &queue->tx_skb[entry];
1202 
1203                         mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
1204                                                    offset, size, DMA_TO_DEVICE);
1205                         if (dma_mapping_error(&bp->pdev->dev, mapping))
1206                                 goto dma_error;
1207 
1208                         /* Save info to properly release resources */
1209                         tx_skb->skb = NULL;
1210                         tx_skb->mapping = mapping;
1211                         tx_skb->size = size;
1212                         tx_skb->mapped_as_page = true;
1213 
1214                         len -= size;
1215                         offset += size;
1216                         count++;
1217                         tx_head++;
1218                 }
1219         }
1220 
1221         /* Should never happen */
1222         if (unlikely(tx_skb == NULL)) {
1223                 netdev_err(bp->dev, "BUG! empty skb!\n");
1224                 return 0;
1225         }
1226 
1227         /* This is the last buffer of the frame: save socket buffer */
1228         tx_skb->skb = skb;
1229 
1230         /* Update TX ring: update buffer descriptors in reverse order
1231          * to avoid race condition
1232          */
1233 
1234         /* Set 'TX_USED' bit in buffer descriptor at tx_head position
1235          * to set the end of TX queue
1236          */
1237         i = tx_head;
1238         entry = macb_tx_ring_wrap(i);
1239         ctrl = MACB_BIT(TX_USED);
1240         desc = &queue->tx_ring[entry];
1241         desc->ctrl = ctrl;
1242 
1243         do {
1244                 i--;
1245                 entry = macb_tx_ring_wrap(i);
1246                 tx_skb = &queue->tx_skb[entry];
1247                 desc = &queue->tx_ring[entry];
1248 
1249                 ctrl = (u32)tx_skb->size;
1250                 if (eof) {
1251                         ctrl |= MACB_BIT(TX_LAST);
1252                         eof = 0;
1253                 }
1254                 if (unlikely(entry == (TX_RING_SIZE - 1)))
1255                         ctrl |= MACB_BIT(TX_WRAP);
1256 
1257                 /* Set TX buffer descriptor */
1258                 desc->addr = tx_skb->mapping;
1259                 /* desc->addr must be visible to hardware before clearing
1260                  * 'TX_USED' bit in desc->ctrl.
1261                  */
1262                 wmb();
1263                 desc->ctrl = ctrl;
1264         } while (i != queue->tx_head);
1265 
1266         queue->tx_head = tx_head;
1267 
1268         return count;
1269 
1270 dma_error:
1271         netdev_err(bp->dev, "TX DMA map failed\n");
1272 
1273         for (i = queue->tx_head; i != tx_head; i++) {
1274                 tx_skb = macb_tx_skb(queue, i);
1275 
1276                 macb_tx_unmap(bp, tx_skb);
1277         }
1278 
1279         return 0;
1280 }
1281 
1282 static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1283 {
1284         u16 queue_index = skb_get_queue_mapping(skb);
1285         struct macb *bp = netdev_priv(dev);
1286         struct macb_queue *queue = &bp->queues[queue_index];
1287         unsigned long flags;
1288         unsigned int count, nr_frags, frag_size, f;
1289 
1290 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1291         netdev_vdbg(bp->dev,
1292                    "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1293                    queue_index, skb->len, skb->head, skb->data,
1294                    skb_tail_pointer(skb), skb_end_pointer(skb));
1295         print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
1296                        skb->data, 16, true);
1297 #endif
1298 
1299         /* Count how many TX buffer descriptors are needed to send this
1300          * socket buffer: skb fragments of jumbo frames may need to be
1301          * splitted into many buffer descriptors.
1302          */
1303         count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
1304         nr_frags = skb_shinfo(skb)->nr_frags;
1305         for (f = 0; f < nr_frags; f++) {
1306                 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
1307                 count += DIV_ROUND_UP(frag_size, bp->max_tx_length);
1308         }
1309 
1310         spin_lock_irqsave(&bp->lock, flags);
1311 
1312         /* This is a hard error, log it. */
1313         if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < count) {
1314                 netif_stop_subqueue(dev, queue_index);
1315                 spin_unlock_irqrestore(&bp->lock, flags);
1316                 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
1317                            queue->tx_head, queue->tx_tail);
1318                 return NETDEV_TX_BUSY;
1319         }
1320 
1321         /* Map socket buffer for DMA transfer */
1322         if (!macb_tx_map(bp, queue, skb)) {
1323                 dev_kfree_skb_any(skb);
1324                 goto unlock;
1325         }
1326 
1327         /* Make newly initialized descriptor visible to hardware */
1328         wmb();
1329 
1330         skb_tx_timestamp(skb);
1331 
1332         macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1333 
1334         if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < 1)
1335                 netif_stop_subqueue(dev, queue_index);
1336 
1337 unlock:
1338         spin_unlock_irqrestore(&bp->lock, flags);
1339 
1340         return NETDEV_TX_OK;
1341 }
1342 
1343 static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
1344 {
1345         if (!macb_is_gem(bp)) {
1346                 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
1347         } else {
1348                 bp->rx_buffer_size = size;
1349 
1350                 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
1351                         netdev_dbg(bp->dev,
1352                                     "RX buffer must be multiple of %d bytes, expanding\n",
1353                                     RX_BUFFER_MULTIPLE);
1354                         bp->rx_buffer_size =
1355                                 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
1356                 }
1357         }
1358 
1359         netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n",
1360                    bp->dev->mtu, bp->rx_buffer_size);
1361 }
1362 
1363 static void gem_free_rx_buffers(struct macb *bp)
1364 {
1365         struct sk_buff          *skb;
1366         struct macb_dma_desc    *desc;
1367         dma_addr_t              addr;
1368         int i;
1369 
1370         if (!bp->rx_skbuff)
1371                 return;
1372 
1373         for (i = 0; i < RX_RING_SIZE; i++) {
1374                 skb = bp->rx_skbuff[i];
1375 
1376                 if (skb == NULL)
1377                         continue;
1378 
1379                 desc = &bp->rx_ring[i];
1380                 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
1381                 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1382                                  DMA_FROM_DEVICE);
1383                 dev_kfree_skb_any(skb);
1384                 skb = NULL;
1385         }
1386 
1387         kfree(bp->rx_skbuff);
1388         bp->rx_skbuff = NULL;
1389 }
1390 
1391 static void macb_free_rx_buffers(struct macb *bp)
1392 {
1393         if (bp->rx_buffers) {
1394                 dma_free_coherent(&bp->pdev->dev,
1395                                   RX_RING_SIZE * bp->rx_buffer_size,
1396                                   bp->rx_buffers, bp->rx_buffers_dma);
1397                 bp->rx_buffers = NULL;
1398         }
1399 }
1400 
1401 static void macb_free_consistent(struct macb *bp)
1402 {
1403         struct macb_queue *queue;
1404         unsigned int q;
1405 
1406         bp->macbgem_ops.mog_free_rx_buffers(bp);
1407         if (bp->rx_ring) {
1408                 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
1409                                   bp->rx_ring, bp->rx_ring_dma);
1410                 bp->rx_ring = NULL;
1411         }
1412 
1413         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1414                 kfree(queue->tx_skb);
1415                 queue->tx_skb = NULL;
1416                 if (queue->tx_ring) {
1417                         dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
1418                                           queue->tx_ring, queue->tx_ring_dma);
1419                         queue->tx_ring = NULL;
1420                 }
1421         }
1422 }
1423 
1424 static int gem_alloc_rx_buffers(struct macb *bp)
1425 {
1426         int size;
1427 
1428         size = RX_RING_SIZE * sizeof(struct sk_buff *);
1429         bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
1430         if (!bp->rx_skbuff)
1431                 return -ENOMEM;
1432         else
1433                 netdev_dbg(bp->dev,
1434                            "Allocated %d RX struct sk_buff entries at %p\n",
1435                            RX_RING_SIZE, bp->rx_skbuff);
1436         return 0;
1437 }
1438 
1439 static int macb_alloc_rx_buffers(struct macb *bp)
1440 {
1441         int size;
1442 
1443         size = RX_RING_SIZE * bp->rx_buffer_size;
1444         bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
1445                                             &bp->rx_buffers_dma, GFP_KERNEL);
1446         if (!bp->rx_buffers)
1447                 return -ENOMEM;
1448         else
1449                 netdev_dbg(bp->dev,
1450                            "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1451                            size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
1452         return 0;
1453 }
1454 
1455 static int macb_alloc_consistent(struct macb *bp)
1456 {
1457         struct macb_queue *queue;
1458         unsigned int q;
1459         int size;
1460 
1461         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1462                 size = TX_RING_BYTES;
1463                 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1464                                                     &queue->tx_ring_dma,
1465                                                     GFP_KERNEL);
1466                 if (!queue->tx_ring)
1467                         goto out_err;
1468                 netdev_dbg(bp->dev,
1469                            "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
1470                            q, size, (unsigned long)queue->tx_ring_dma,
1471                            queue->tx_ring);
1472 
1473                 size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
1474                 queue->tx_skb = kmalloc(size, GFP_KERNEL);
1475                 if (!queue->tx_skb)
1476                         goto out_err;
1477         }
1478 
1479         size = RX_RING_BYTES;
1480         bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1481                                          &bp->rx_ring_dma, GFP_KERNEL);
1482         if (!bp->rx_ring)
1483                 goto out_err;
1484         netdev_dbg(bp->dev,
1485                    "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
1486                    size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
1487 
1488         if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
1489                 goto out_err;
1490 
1491         return 0;
1492 
1493 out_err:
1494         macb_free_consistent(bp);
1495         return -ENOMEM;
1496 }
1497 
1498 static void gem_init_rings(struct macb *bp)
1499 {
1500         struct macb_queue *queue;
1501         unsigned int q;
1502         int i;
1503 
1504         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1505                 for (i = 0; i < TX_RING_SIZE; i++) {
1506                         queue->tx_ring[i].addr = 0;
1507                         queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
1508                 }
1509                 queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1510                 queue->tx_head = 0;
1511                 queue->tx_tail = 0;
1512         }
1513 
1514         bp->rx_tail = 0;
1515         bp->rx_prepared_head = 0;
1516 
1517         gem_rx_refill(bp);
1518 }
1519 
1520 static void macb_init_rings(struct macb *bp)
1521 {
1522         int i;
1523         dma_addr_t addr;
1524 
1525         addr = bp->rx_buffers_dma;
1526         for (i = 0; i < RX_RING_SIZE; i++) {
1527                 bp->rx_ring[i].addr = addr;
1528                 bp->rx_ring[i].ctrl = 0;
1529                 addr += bp->rx_buffer_size;
1530         }
1531         bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
1532 
1533         for (i = 0; i < TX_RING_SIZE; i++) {
1534                 bp->queues[0].tx_ring[i].addr = 0;
1535                 bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
1536         }
1537         bp->queues[0].tx_head = 0;
1538         bp->queues[0].tx_tail = 0;
1539         bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1540 
1541         bp->rx_tail = 0;
1542 }
1543 
1544 static void macb_reset_hw(struct macb *bp)
1545 {
1546         struct macb_queue *queue;
1547         unsigned int q;
1548 
1549         /*
1550          * Disable RX and TX (XXX: Should we halt the transmission
1551          * more gracefully?)
1552          */
1553         macb_writel(bp, NCR, 0);
1554 
1555         /* Clear the stats registers (XXX: Update stats first?) */
1556         macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
1557 
1558         /* Clear all status flags */
1559         macb_writel(bp, TSR, -1);
1560         macb_writel(bp, RSR, -1);
1561 
1562         /* Disable all interrupts */
1563         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1564                 queue_writel(queue, IDR, -1);
1565                 queue_readl(queue, ISR);
1566                 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1567                         queue_writel(queue, ISR, -1);
1568         }
1569 }
1570 
1571 static u32 gem_mdc_clk_div(struct macb *bp)
1572 {
1573         u32 config;
1574         unsigned long pclk_hz = clk_get_rate(bp->pclk);
1575 
1576         if (pclk_hz <= 20000000)
1577                 config = GEM_BF(CLK, GEM_CLK_DIV8);
1578         else if (pclk_hz <= 40000000)
1579                 config = GEM_BF(CLK, GEM_CLK_DIV16);
1580         else if (pclk_hz <= 80000000)
1581                 config = GEM_BF(CLK, GEM_CLK_DIV32);
1582         else if (pclk_hz <= 120000000)
1583                 config = GEM_BF(CLK, GEM_CLK_DIV48);
1584         else if (pclk_hz <= 160000000)
1585                 config = GEM_BF(CLK, GEM_CLK_DIV64);
1586         else
1587                 config = GEM_BF(CLK, GEM_CLK_DIV96);
1588 
1589         return config;
1590 }
1591 
1592 static u32 macb_mdc_clk_div(struct macb *bp)
1593 {
1594         u32 config;
1595         unsigned long pclk_hz;
1596 
1597         if (macb_is_gem(bp))
1598                 return gem_mdc_clk_div(bp);
1599 
1600         pclk_hz = clk_get_rate(bp->pclk);
1601         if (pclk_hz <= 20000000)
1602                 config = MACB_BF(CLK, MACB_CLK_DIV8);
1603         else if (pclk_hz <= 40000000)
1604                 config = MACB_BF(CLK, MACB_CLK_DIV16);
1605         else if (pclk_hz <= 80000000)
1606                 config = MACB_BF(CLK, MACB_CLK_DIV32);
1607         else
1608                 config = MACB_BF(CLK, MACB_CLK_DIV64);
1609 
1610         return config;
1611 }
1612 
1613 /*
1614  * Get the DMA bus width field of the network configuration register that we
1615  * should program.  We find the width from decoding the design configuration
1616  * register to find the maximum supported data bus width.
1617  */
1618 static u32 macb_dbw(struct macb *bp)
1619 {
1620         if (!macb_is_gem(bp))
1621                 return 0;
1622 
1623         switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
1624         case 4:
1625                 return GEM_BF(DBW, GEM_DBW128);
1626         case 2:
1627                 return GEM_BF(DBW, GEM_DBW64);
1628         case 1:
1629         default:
1630                 return GEM_BF(DBW, GEM_DBW32);
1631         }
1632 }
1633 
1634 /*
1635  * Configure the receive DMA engine
1636  * - use the correct receive buffer size
1637  * - set best burst length for DMA operations
1638  *   (if not supported by FIFO, it will fallback to default)
1639  * - set both rx/tx packet buffers to full memory size
1640  * These are configurable parameters for GEM.
1641  */
1642 static void macb_configure_dma(struct macb *bp)
1643 {
1644         u32 dmacfg;
1645 
1646         if (macb_is_gem(bp)) {
1647                 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
1648                 dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
1649                 if (bp->dma_burst_length)
1650                         dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
1651                 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
1652                 dmacfg &= ~GEM_BIT(ENDIA_PKT);
1653 
1654                 if (bp->native_io)
1655                         dmacfg &= ~GEM_BIT(ENDIA_DESC);
1656                 else
1657                         dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
1658 
1659                 if (bp->dev->features & NETIF_F_HW_CSUM)
1660                         dmacfg |= GEM_BIT(TXCOEN);
1661                 else
1662                         dmacfg &= ~GEM_BIT(TXCOEN);
1663                 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
1664                            dmacfg);
1665                 gem_writel(bp, DMACFG, dmacfg);
1666         }
1667 }
1668 
1669 static void macb_init_hw(struct macb *bp)
1670 {
1671         struct macb_queue *queue;
1672         unsigned int q;
1673 
1674         u32 config;
1675 
1676         macb_reset_hw(bp);
1677         macb_set_hwaddr(bp);
1678 
1679         config = macb_mdc_clk_div(bp);
1680         if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
1681                 config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
1682         config |= MACB_BF(RBOF, NET_IP_ALIGN);  /* Make eth data aligned */
1683         config |= MACB_BIT(PAE);                /* PAuse Enable */
1684         config |= MACB_BIT(DRFCS);              /* Discard Rx FCS */
1685         if (bp->caps & MACB_CAPS_JUMBO)
1686                 config |= MACB_BIT(JFRAME);     /* Enable jumbo frames */
1687         else
1688                 config |= MACB_BIT(BIG);        /* Receive oversized frames */
1689         if (bp->dev->flags & IFF_PROMISC)
1690                 config |= MACB_BIT(CAF);        /* Copy All Frames */
1691         else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
1692                 config |= GEM_BIT(RXCOEN);
1693         if (!(bp->dev->flags & IFF_BROADCAST))
1694                 config |= MACB_BIT(NBC);        /* No BroadCast */
1695         config |= macb_dbw(bp);
1696         macb_writel(bp, NCFGR, config);
1697         if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
1698                 gem_writel(bp, JML, bp->jumbo_max_len);
1699         bp->speed = SPEED_10;
1700         bp->duplex = DUPLEX_HALF;
1701         bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
1702         if (bp->caps & MACB_CAPS_JUMBO)
1703                 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
1704 
1705         macb_configure_dma(bp);
1706 
1707         /* Initialize TX and RX buffers */
1708         macb_writel(bp, RBQP, bp->rx_ring_dma);
1709         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1710                 queue_writel(queue, TBQP, queue->tx_ring_dma);
1711 
1712                 /* Enable interrupts */
1713                 queue_writel(queue, IER,
1714                              MACB_RX_INT_FLAGS |
1715                              MACB_TX_INT_FLAGS |
1716                              MACB_BIT(HRESP));
1717         }
1718 
1719         /* Enable TX and RX */
1720         macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
1721 }
1722 
1723 /*
1724  * The hash address register is 64 bits long and takes up two
1725  * locations in the memory map.  The least significant bits are stored
1726  * in EMAC_HSL and the most significant bits in EMAC_HSH.
1727  *
1728  * The unicast hash enable and the multicast hash enable bits in the
1729  * network configuration register enable the reception of hash matched
1730  * frames. The destination address is reduced to a 6 bit index into
1731  * the 64 bit hash register using the following hash function.  The
1732  * hash function is an exclusive or of every sixth bit of the
1733  * destination address.
1734  *
1735  * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
1736  * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
1737  * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
1738  * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
1739  * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
1740  * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
1741  *
1742  * da[0] represents the least significant bit of the first byte
1743  * received, that is, the multicast/unicast indicator, and da[47]
1744  * represents the most significant bit of the last byte received.  If
1745  * the hash index, hi[n], points to a bit that is set in the hash
1746  * register then the frame will be matched according to whether the
1747  * frame is multicast or unicast.  A multicast match will be signalled
1748  * if the multicast hash enable bit is set, da[0] is 1 and the hash
1749  * index points to a bit set in the hash register.  A unicast match
1750  * will be signalled if the unicast hash enable bit is set, da[0] is 0
1751  * and the hash index points to a bit set in the hash register.  To
1752  * receive all multicast frames, the hash register should be set with
1753  * all ones and the multicast hash enable bit should be set in the
1754  * network configuration register.
1755  */
1756 
1757 static inline int hash_bit_value(int bitnr, __u8 *addr)
1758 {
1759         if (addr[bitnr / 8] & (1 << (bitnr % 8)))
1760                 return 1;
1761         return 0;
1762 }
1763 
1764 /*
1765  * Return the hash index value for the specified address.
1766  */
1767 static int hash_get_index(__u8 *addr)
1768 {
1769         int i, j, bitval;
1770         int hash_index = 0;
1771 
1772         for (j = 0; j < 6; j++) {
1773                 for (i = 0, bitval = 0; i < 8; i++)
1774                         bitval ^= hash_bit_value(i * 6 + j, addr);
1775 
1776                 hash_index |= (bitval << j);
1777         }
1778 
1779         return hash_index;
1780 }
1781 
1782 /*
1783  * Add multicast addresses to the internal multicast-hash table.
1784  */
1785 static void macb_sethashtable(struct net_device *dev)
1786 {
1787         struct netdev_hw_addr *ha;
1788         unsigned long mc_filter[2];
1789         unsigned int bitnr;
1790         struct macb *bp = netdev_priv(dev);
1791 
1792         mc_filter[0] = mc_filter[1] = 0;
1793 
1794         netdev_for_each_mc_addr(ha, dev) {
1795                 bitnr = hash_get_index(ha->addr);
1796                 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
1797         }
1798 
1799         macb_or_gem_writel(bp, HRB, mc_filter[0]);
1800         macb_or_gem_writel(bp, HRT, mc_filter[1]);
1801 }
1802 
1803 /*
1804  * Enable/Disable promiscuous and multicast modes.
1805  */
1806 static void macb_set_rx_mode(struct net_device *dev)
1807 {
1808         unsigned long cfg;
1809         struct macb *bp = netdev_priv(dev);
1810 
1811         cfg = macb_readl(bp, NCFGR);
1812 
1813         if (dev->flags & IFF_PROMISC) {
1814                 /* Enable promiscuous mode */
1815                 cfg |= MACB_BIT(CAF);
1816 
1817                 /* Disable RX checksum offload */
1818                 if (macb_is_gem(bp))
1819                         cfg &= ~GEM_BIT(RXCOEN);
1820         } else {
1821                 /* Disable promiscuous mode */
1822                 cfg &= ~MACB_BIT(CAF);
1823 
1824                 /* Enable RX checksum offload only if requested */
1825                 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
1826                         cfg |= GEM_BIT(RXCOEN);
1827         }
1828 
1829         if (dev->flags & IFF_ALLMULTI) {
1830                 /* Enable all multicast mode */
1831                 macb_or_gem_writel(bp, HRB, -1);
1832                 macb_or_gem_writel(bp, HRT, -1);
1833                 cfg |= MACB_BIT(NCFGR_MTI);
1834         } else if (!netdev_mc_empty(dev)) {
1835                 /* Enable specific multicasts */
1836                 macb_sethashtable(dev);
1837                 cfg |= MACB_BIT(NCFGR_MTI);
1838         } else if (dev->flags & (~IFF_ALLMULTI)) {
1839                 /* Disable all multicast mode */
1840                 macb_or_gem_writel(bp, HRB, 0);
1841                 macb_or_gem_writel(bp, HRT, 0);
1842                 cfg &= ~MACB_BIT(NCFGR_MTI);
1843         }
1844 
1845         macb_writel(bp, NCFGR, cfg);
1846 }
1847 
1848 static int macb_open(struct net_device *dev)
1849 {
1850         struct macb *bp = netdev_priv(dev);
1851         size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
1852         int err;
1853 
1854         netdev_dbg(bp->dev, "open\n");
1855 
1856         /* carrier starts down */
1857         netif_carrier_off(dev);
1858 
1859         /* if the phy is not yet register, retry later*/
1860         if (!bp->phy_dev)
1861                 return -EAGAIN;
1862 
1863         /* RX buffers initialization */
1864         macb_init_rx_buffer_size(bp, bufsz);
1865 
1866         err = macb_alloc_consistent(bp);
1867         if (err) {
1868                 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
1869                            err);
1870                 return err;
1871         }
1872 
1873         napi_enable(&bp->napi);
1874 
1875         bp->macbgem_ops.mog_init_rings(bp);
1876         macb_init_hw(bp);
1877 
1878         /* schedule a link state check */
1879         phy_start(bp->phy_dev);
1880 
1881         netif_tx_start_all_queues(dev);
1882 
1883         return 0;
1884 }
1885 
1886 static int macb_close(struct net_device *dev)
1887 {
1888         struct macb *bp = netdev_priv(dev);
1889         unsigned long flags;
1890 
1891         netif_tx_stop_all_queues(dev);
1892         napi_disable(&bp->napi);
1893 
1894         if (bp->phy_dev)
1895                 phy_stop(bp->phy_dev);
1896 
1897         spin_lock_irqsave(&bp->lock, flags);
1898         macb_reset_hw(bp);
1899         netif_carrier_off(dev);
1900         spin_unlock_irqrestore(&bp->lock, flags);
1901 
1902         macb_free_consistent(bp);
1903 
1904         return 0;
1905 }
1906 
1907 static int macb_change_mtu(struct net_device *dev, int new_mtu)
1908 {
1909         struct macb *bp = netdev_priv(dev);
1910         u32 max_mtu;
1911 
1912         if (netif_running(dev))
1913                 return -EBUSY;
1914 
1915         max_mtu = ETH_DATA_LEN;
1916         if (bp->caps & MACB_CAPS_JUMBO)
1917                 max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
1918 
1919         if ((new_mtu > max_mtu) || (new_mtu < GEM_MTU_MIN_SIZE))
1920                 return -EINVAL;
1921 
1922         dev->mtu = new_mtu;
1923 
1924         return 0;
1925 }
1926 
1927 static void gem_update_stats(struct macb *bp)
1928 {
1929         unsigned int i;
1930         u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
1931 
1932         for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
1933                 u32 offset = gem_statistics[i].offset;
1934                 u64 val = bp->macb_reg_readl(bp, offset);
1935 
1936                 bp->ethtool_stats[i] += val;
1937                 *p += val;
1938 
1939                 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
1940                         /* Add GEM_OCTTXH, GEM_OCTRXH */
1941                         val = bp->macb_reg_readl(bp, offset + 4);
1942                         bp->ethtool_stats[i] += ((u64)val) << 32;
1943                         *(++p) += val;
1944                 }
1945         }
1946 }
1947 
1948 static struct net_device_stats *gem_get_stats(struct macb *bp)
1949 {
1950         struct gem_stats *hwstat = &bp->hw_stats.gem;
1951         struct net_device_stats *nstat = &bp->stats;
1952 
1953         gem_update_stats(bp);
1954 
1955         nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
1956                             hwstat->rx_alignment_errors +
1957                             hwstat->rx_resource_errors +
1958                             hwstat->rx_overruns +
1959                             hwstat->rx_oversize_frames +
1960                             hwstat->rx_jabbers +
1961                             hwstat->rx_undersized_frames +
1962                             hwstat->rx_length_field_frame_errors);
1963         nstat->tx_errors = (hwstat->tx_late_collisions +
1964                             hwstat->tx_excessive_collisions +
1965                             hwstat->tx_underrun +
1966                             hwstat->tx_carrier_sense_errors);
1967         nstat->multicast = hwstat->rx_multicast_frames;
1968         nstat->collisions = (hwstat->tx_single_collision_frames +
1969                              hwstat->tx_multiple_collision_frames +
1970                              hwstat->tx_excessive_collisions);
1971         nstat->rx_length_errors = (hwstat->rx_oversize_frames +
1972                                    hwstat->rx_jabbers +
1973                                    hwstat->rx_undersized_frames +
1974                                    hwstat->rx_length_field_frame_errors);
1975         nstat->rx_over_errors = hwstat->rx_resource_errors;
1976         nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
1977         nstat->rx_frame_errors = hwstat->rx_alignment_errors;
1978         nstat->rx_fifo_errors = hwstat->rx_overruns;
1979         nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
1980         nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
1981         nstat->tx_fifo_errors = hwstat->tx_underrun;
1982 
1983         return nstat;
1984 }
1985 
1986 static void gem_get_ethtool_stats(struct net_device *dev,
1987                                   struct ethtool_stats *stats, u64 *data)
1988 {
1989         struct macb *bp;
1990 
1991         bp = netdev_priv(dev);
1992         gem_update_stats(bp);
1993         memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
1994 }
1995 
1996 static int gem_get_sset_count(struct net_device *dev, int sset)
1997 {
1998         switch (sset) {
1999         case ETH_SS_STATS:
2000                 return GEM_STATS_LEN;
2001         default:
2002                 return -EOPNOTSUPP;
2003         }
2004 }
2005 
2006 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
2007 {
2008         unsigned int i;
2009 
2010         switch (sset) {
2011         case ETH_SS_STATS:
2012                 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
2013                         memcpy(p, gem_statistics[i].stat_string,
2014                                ETH_GSTRING_LEN);
2015                 break;
2016         }
2017 }
2018 
2019 static struct net_device_stats *macb_get_stats(struct net_device *dev)
2020 {
2021         struct macb *bp = netdev_priv(dev);
2022         struct net_device_stats *nstat = &bp->stats;
2023         struct macb_stats *hwstat = &bp->hw_stats.macb;
2024 
2025         if (macb_is_gem(bp))
2026                 return gem_get_stats(bp);
2027 
2028         /* read stats from hardware */
2029         macb_update_stats(bp);
2030 
2031         /* Convert HW stats into netdevice stats */
2032         nstat->rx_errors = (hwstat->rx_fcs_errors +
2033                             hwstat->rx_align_errors +
2034                             hwstat->rx_resource_errors +
2035                             hwstat->rx_overruns +
2036                             hwstat->rx_oversize_pkts +
2037                             hwstat->rx_jabbers +
2038                             hwstat->rx_undersize_pkts +
2039                             hwstat->rx_length_mismatch);
2040         nstat->tx_errors = (hwstat->tx_late_cols +
2041                             hwstat->tx_excessive_cols +
2042                             hwstat->tx_underruns +
2043                             hwstat->tx_carrier_errors +
2044                             hwstat->sqe_test_errors);
2045         nstat->collisions = (hwstat->tx_single_cols +
2046                              hwstat->tx_multiple_cols +
2047                              hwstat->tx_excessive_cols);
2048         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
2049                                    hwstat->rx_jabbers +
2050                                    hwstat->rx_undersize_pkts +
2051                                    hwstat->rx_length_mismatch);
2052         nstat->rx_over_errors = hwstat->rx_resource_errors +
2053                                    hwstat->rx_overruns;
2054         nstat->rx_crc_errors = hwstat->rx_fcs_errors;
2055         nstat->rx_frame_errors = hwstat->rx_align_errors;
2056         nstat->rx_fifo_errors = hwstat->rx_overruns;
2057         /* XXX: What does "missed" mean? */
2058         nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
2059         nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
2060         nstat->tx_fifo_errors = hwstat->tx_underruns;
2061         /* Don't know about heartbeat or window errors... */
2062 
2063         return nstat;
2064 }
2065 
2066 static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2067 {
2068         struct macb *bp = netdev_priv(dev);
2069         struct phy_device *phydev = bp->phy_dev;
2070 
2071         if (!phydev)
2072                 return -ENODEV;
2073 
2074         return phy_ethtool_gset(phydev, cmd);
2075 }
2076 
2077 static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2078 {
2079         struct macb *bp = netdev_priv(dev);
2080         struct phy_device *phydev = bp->phy_dev;
2081 
2082         if (!phydev)
2083                 return -ENODEV;
2084 
2085         return phy_ethtool_sset(phydev, cmd);
2086 }
2087 
2088 static int macb_get_regs_len(struct net_device *netdev)
2089 {
2090         return MACB_GREGS_NBR * sizeof(u32);
2091 }
2092 
2093 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2094                           void *p)
2095 {
2096         struct macb *bp = netdev_priv(dev);
2097         unsigned int tail, head;
2098         u32 *regs_buff = p;
2099 
2100         regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
2101                         | MACB_GREGS_VERSION;
2102 
2103         tail = macb_tx_ring_wrap(bp->queues[0].tx_tail);
2104         head = macb_tx_ring_wrap(bp->queues[0].tx_head);
2105 
2106         regs_buff[0]  = macb_readl(bp, NCR);
2107         regs_buff[1]  = macb_or_gem_readl(bp, NCFGR);
2108         regs_buff[2]  = macb_readl(bp, NSR);
2109         regs_buff[3]  = macb_readl(bp, TSR);
2110         regs_buff[4]  = macb_readl(bp, RBQP);
2111         regs_buff[5]  = macb_readl(bp, TBQP);
2112         regs_buff[6]  = macb_readl(bp, RSR);
2113         regs_buff[7]  = macb_readl(bp, IMR);
2114 
2115         regs_buff[8]  = tail;
2116         regs_buff[9]  = head;
2117         regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
2118         regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
2119 
2120         if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
2121                 regs_buff[12] = macb_or_gem_readl(bp, USRIO);
2122         if (macb_is_gem(bp)) {
2123                 regs_buff[13] = gem_readl(bp, DMACFG);
2124         }
2125 }
2126 
2127 static const struct ethtool_ops macb_ethtool_ops = {
2128         .get_settings           = macb_get_settings,
2129         .set_settings           = macb_set_settings,
2130         .get_regs_len           = macb_get_regs_len,
2131         .get_regs               = macb_get_regs,
2132         .get_link               = ethtool_op_get_link,
2133         .get_ts_info            = ethtool_op_get_ts_info,
2134 };
2135 
2136 static const struct ethtool_ops gem_ethtool_ops = {
2137         .get_settings           = macb_get_settings,
2138         .set_settings           = macb_set_settings,
2139         .get_regs_len           = macb_get_regs_len,
2140         .get_regs               = macb_get_regs,
2141         .get_link               = ethtool_op_get_link,
2142         .get_ts_info            = ethtool_op_get_ts_info,
2143         .get_ethtool_stats      = gem_get_ethtool_stats,
2144         .get_strings            = gem_get_ethtool_strings,
2145         .get_sset_count         = gem_get_sset_count,
2146 };
2147 
2148 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2149 {
2150         struct macb *bp = netdev_priv(dev);
2151         struct phy_device *phydev = bp->phy_dev;
2152 
2153         if (!netif_running(dev))
2154                 return -EINVAL;
2155 
2156         if (!phydev)
2157                 return -ENODEV;
2158 
2159         return phy_mii_ioctl(phydev, rq, cmd);
2160 }
2161 
2162 static int macb_set_features(struct net_device *netdev,
2163                              netdev_features_t features)
2164 {
2165         struct macb *bp = netdev_priv(netdev);
2166         netdev_features_t changed = features ^ netdev->features;
2167 
2168         /* TX checksum offload */
2169         if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
2170                 u32 dmacfg;
2171 
2172                 dmacfg = gem_readl(bp, DMACFG);
2173                 if (features & NETIF_F_HW_CSUM)
2174                         dmacfg |= GEM_BIT(TXCOEN);
2175                 else
2176                         dmacfg &= ~GEM_BIT(TXCOEN);
2177                 gem_writel(bp, DMACFG, dmacfg);
2178         }
2179 
2180         /* RX checksum offload */
2181         if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
2182                 u32 netcfg;
2183 
2184                 netcfg = gem_readl(bp, NCFGR);
2185                 if (features & NETIF_F_RXCSUM &&
2186                     !(netdev->flags & IFF_PROMISC))
2187                         netcfg |= GEM_BIT(RXCOEN);
2188                 else
2189                         netcfg &= ~GEM_BIT(RXCOEN);
2190                 gem_writel(bp, NCFGR, netcfg);
2191         }
2192 
2193         return 0;
2194 }
2195 
2196 static const struct net_device_ops macb_netdev_ops = {
2197         .ndo_open               = macb_open,
2198         .ndo_stop               = macb_close,
2199         .ndo_start_xmit         = macb_start_xmit,
2200         .ndo_set_rx_mode        = macb_set_rx_mode,
2201         .ndo_get_stats          = macb_get_stats,
2202         .ndo_do_ioctl           = macb_ioctl,
2203         .ndo_validate_addr      = eth_validate_addr,
2204         .ndo_change_mtu         = macb_change_mtu,
2205         .ndo_set_mac_address    = eth_mac_addr,
2206 #ifdef CONFIG_NET_POLL_CONTROLLER
2207         .ndo_poll_controller    = macb_poll_controller,
2208 #endif
2209         .ndo_set_features       = macb_set_features,
2210 };
2211 
2212 /*
2213  * Configure peripheral capabilities according to device tree
2214  * and integration options used
2215  */
2216 static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf)
2217 {
2218         u32 dcfg;
2219 
2220         if (dt_conf)
2221                 bp->caps = dt_conf->caps;
2222 
2223         if (hw_is_gem(bp->regs, bp->native_io)) {
2224                 bp->caps |= MACB_CAPS_MACB_IS_GEM;
2225 
2226                 dcfg = gem_readl(bp, DCFG1);
2227                 if (GEM_BFEXT(IRQCOR, dcfg) == 0)
2228                         bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
2229                 dcfg = gem_readl(bp, DCFG2);
2230                 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
2231                         bp->caps |= MACB_CAPS_FIFO_MODE;
2232         }
2233 
2234         dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
2235 }
2236 
2237 static void macb_probe_queues(void __iomem *mem,
2238                               bool native_io,
2239                               unsigned int *queue_mask,
2240                               unsigned int *num_queues)
2241 {
2242         unsigned int hw_q;
2243 
2244         *queue_mask = 0x1;
2245         *num_queues = 1;
2246 
2247         /* is it macb or gem ?
2248          *
2249          * We need to read directly from the hardware here because
2250          * we are early in the probe process and don't have the
2251          * MACB_CAPS_MACB_IS_GEM flag positioned
2252          */
2253         if (!hw_is_gem(mem, native_io))
2254                 return;
2255 
2256         /* bit 0 is never set but queue 0 always exists */
2257         *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
2258 
2259         *queue_mask |= 0x1;
2260 
2261         for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
2262                 if (*queue_mask & (1 << hw_q))
2263                         (*num_queues)++;
2264 }
2265 
2266 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
2267                          struct clk **hclk, struct clk **tx_clk)
2268 {
2269         int err;
2270 
2271         *pclk = devm_clk_get(&pdev->dev, "pclk");
2272         if (IS_ERR(*pclk)) {
2273                 err = PTR_ERR(*pclk);
2274                 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
2275                 return err;
2276         }
2277 
2278         *hclk = devm_clk_get(&pdev->dev, "hclk");
2279         if (IS_ERR(*hclk)) {
2280                 err = PTR_ERR(*hclk);
2281                 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
2282                 return err;
2283         }
2284 
2285         *tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
2286         if (IS_ERR(*tx_clk))
2287                 *tx_clk = NULL;
2288 
2289         err = clk_prepare_enable(*pclk);
2290         if (err) {
2291                 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
2292                 return err;
2293         }
2294 
2295         err = clk_prepare_enable(*hclk);
2296         if (err) {
2297                 dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
2298                 goto err_disable_pclk;
2299         }
2300 
2301         err = clk_prepare_enable(*tx_clk);
2302         if (err) {
2303                 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
2304                 goto err_disable_hclk;
2305         }
2306 
2307         return 0;
2308 
2309 err_disable_hclk:
2310         clk_disable_unprepare(*hclk);
2311 
2312 err_disable_pclk:
2313         clk_disable_unprepare(*pclk);
2314 
2315         return err;
2316 }
2317 
2318 static int macb_init(struct platform_device *pdev)
2319 {
2320         struct net_device *dev = platform_get_drvdata(pdev);
2321         unsigned int hw_q, q;
2322         struct macb *bp = netdev_priv(dev);
2323         struct macb_queue *queue;
2324         int err;
2325         u32 val;
2326 
2327         /* set the queue register mapping once for all: queue0 has a special
2328          * register mapping but we don't want to test the queue index then
2329          * compute the corresponding register offset at run time.
2330          */
2331         for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
2332                 if (!(bp->queue_mask & (1 << hw_q)))
2333                         continue;
2334 
2335                 queue = &bp->queues[q];
2336                 queue->bp = bp;
2337                 if (hw_q) {
2338                         queue->ISR  = GEM_ISR(hw_q - 1);
2339                         queue->IER  = GEM_IER(hw_q - 1);
2340                         queue->IDR  = GEM_IDR(hw_q - 1);
2341                         queue->IMR  = GEM_IMR(hw_q - 1);
2342                         queue->TBQP = GEM_TBQP(hw_q - 1);
2343                 } else {
2344                         /* queue0 uses legacy registers */
2345                         queue->ISR  = MACB_ISR;
2346                         queue->IER  = MACB_IER;
2347                         queue->IDR  = MACB_IDR;
2348                         queue->IMR  = MACB_IMR;
2349                         queue->TBQP = MACB_TBQP;
2350                 }
2351 
2352                 /* get irq: here we use the linux queue index, not the hardware
2353                  * queue index. the queue irq definitions in the device tree
2354                  * must remove the optional gaps that could exist in the
2355                  * hardware queue mask.
2356                  */
2357                 queue->irq = platform_get_irq(pdev, q);
2358                 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
2359                                        IRQF_SHARED, dev->name, queue);
2360                 if (err) {
2361                         dev_err(&pdev->dev,
2362                                 "Unable to request IRQ %d (error %d)\n",
2363                                 queue->irq, err);
2364                         return err;
2365                 }
2366 
2367                 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
2368                 q++;
2369         }
2370 
2371         dev->netdev_ops = &macb_netdev_ops;
2372         netif_napi_add(dev, &bp->napi, macb_poll, 64);
2373 
2374         /* setup appropriated routines according to adapter type */
2375         if (macb_is_gem(bp)) {
2376                 bp->max_tx_length = GEM_MAX_TX_LEN;
2377                 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
2378                 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
2379                 bp->macbgem_ops.mog_init_rings = gem_init_rings;
2380                 bp->macbgem_ops.mog_rx = gem_rx;
2381                 dev->ethtool_ops = &gem_ethtool_ops;
2382         } else {
2383                 bp->max_tx_length = MACB_MAX_TX_LEN;
2384                 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
2385                 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
2386                 bp->macbgem_ops.mog_init_rings = macb_init_rings;
2387                 bp->macbgem_ops.mog_rx = macb_rx;
2388                 dev->ethtool_ops = &macb_ethtool_ops;
2389         }
2390 
2391         /* Set features */
2392         dev->hw_features = NETIF_F_SG;
2393         /* Checksum offload is only available on gem with packet buffer */
2394         if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
2395                 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2396         if (bp->caps & MACB_CAPS_SG_DISABLED)
2397                 dev->hw_features &= ~NETIF_F_SG;
2398         dev->features = dev->hw_features;
2399 
2400         if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
2401                 val = 0;
2402                 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
2403                         val = GEM_BIT(RGMII);
2404                 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
2405                          (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII))
2406                         val = MACB_BIT(RMII);
2407                 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII))
2408                         val = MACB_BIT(MII);
2409 
2410                 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
2411                         val |= MACB_BIT(CLKEN);
2412 
2413                 macb_or_gem_writel(bp, USRIO, val);
2414         }
2415 
2416         /* Set MII management clock divider */
2417         val = macb_mdc_clk_div(bp);
2418         val |= macb_dbw(bp);
2419         if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
2420                 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
2421         macb_writel(bp, NCFGR, val);
2422 
2423         return 0;
2424 }
2425 
2426 #if defined(CONFIG_OF)
2427 /* 1518 rounded up */
2428 #define AT91ETHER_MAX_RBUFF_SZ  0x600
2429 /* max number of receive buffers */
2430 #define AT91ETHER_MAX_RX_DESCR  9
2431 
2432 /* Initialize and start the Receiver and Transmit subsystems */
2433 static int at91ether_start(struct net_device *dev)
2434 {
2435         struct macb *lp = netdev_priv(dev);
2436         dma_addr_t addr;
2437         u32 ctl;
2438         int i;
2439 
2440         lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
2441                                          (AT91ETHER_MAX_RX_DESCR *
2442                                           sizeof(struct macb_dma_desc)),
2443                                          &lp->rx_ring_dma, GFP_KERNEL);
2444         if (!lp->rx_ring)
2445                 return -ENOMEM;
2446 
2447         lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
2448                                             AT91ETHER_MAX_RX_DESCR *
2449                                             AT91ETHER_MAX_RBUFF_SZ,
2450                                             &lp->rx_buffers_dma, GFP_KERNEL);
2451         if (!lp->rx_buffers) {
2452                 dma_free_coherent(&lp->pdev->dev,
2453                                   AT91ETHER_MAX_RX_DESCR *
2454                                   sizeof(struct macb_dma_desc),
2455                                   lp->rx_ring, lp->rx_ring_dma);
2456                 lp->rx_ring = NULL;
2457                 return -ENOMEM;
2458         }
2459 
2460         addr = lp->rx_buffers_dma;
2461         for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
2462                 lp->rx_ring[i].addr = addr;
2463                 lp->rx_ring[i].ctrl = 0;
2464                 addr += AT91ETHER_MAX_RBUFF_SZ;
2465         }
2466 
2467         /* Set the Wrap bit on the last descriptor */
2468         lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
2469 
2470         /* Reset buffer index */
2471         lp->rx_tail = 0;
2472 
2473         /* Program address of descriptor list in Rx Buffer Queue register */
2474         macb_writel(lp, RBQP, lp->rx_ring_dma);
2475 
2476         /* Enable Receive and Transmit */
2477         ctl = macb_readl(lp, NCR);
2478         macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
2479 
2480         return 0;
2481 }
2482 
2483 /* Open the ethernet interface */
2484 static int at91ether_open(struct net_device *dev)
2485 {
2486         struct macb *lp = netdev_priv(dev);
2487         u32 ctl;
2488         int ret;
2489 
2490         /* Clear internal statistics */
2491         ctl = macb_readl(lp, NCR);
2492         macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
2493 
2494         macb_set_hwaddr(lp);
2495 
2496         ret = at91ether_start(dev);
2497         if (ret)
2498                 return ret;
2499 
2500         /* Enable MAC interrupts */
2501         macb_writel(lp, IER, MACB_BIT(RCOMP)    |
2502                              MACB_BIT(RXUBR)    |
2503                              MACB_BIT(ISR_TUND) |
2504                              MACB_BIT(ISR_RLE)  |
2505                              MACB_BIT(TCOMP)    |
2506                              MACB_BIT(ISR_ROVR) |
2507                              MACB_BIT(HRESP));
2508 
2509         /* schedule a link state check */
2510         phy_start(lp->phy_dev);
2511 
2512         netif_start_queue(dev);
2513 
2514         return 0;
2515 }
2516 
2517 /* Close the interface */
2518 static int at91ether_close(struct net_device *dev)
2519 {
2520         struct macb *lp = netdev_priv(dev);
2521         u32 ctl;
2522 
2523         /* Disable Receiver and Transmitter */
2524         ctl = macb_readl(lp, NCR);
2525         macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
2526 
2527         /* Disable MAC interrupts */
2528         macb_writel(lp, IDR, MACB_BIT(RCOMP)    |
2529                              MACB_BIT(RXUBR)    |
2530                              MACB_BIT(ISR_TUND) |
2531                              MACB_BIT(ISR_RLE)  |
2532                              MACB_BIT(TCOMP)    |
2533                              MACB_BIT(ISR_ROVR) |
2534                              MACB_BIT(HRESP));
2535 
2536         netif_stop_queue(dev);
2537 
2538         dma_free_coherent(&lp->pdev->dev,
2539                           AT91ETHER_MAX_RX_DESCR *
2540                           sizeof(struct macb_dma_desc),
2541                           lp->rx_ring, lp->rx_ring_dma);
2542         lp->rx_ring = NULL;
2543 
2544         dma_free_coherent(&lp->pdev->dev,
2545                           AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
2546                           lp->rx_buffers, lp->rx_buffers_dma);
2547         lp->rx_buffers = NULL;
2548 
2549         return 0;
2550 }
2551 
2552 /* Transmit packet */
2553 static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
2554 {
2555         struct macb *lp = netdev_priv(dev);
2556 
2557         if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
2558                 netif_stop_queue(dev);
2559 
2560                 /* Store packet information (to free when Tx completed) */
2561                 lp->skb = skb;
2562                 lp->skb_length = skb->len;
2563                 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
2564                                                         DMA_TO_DEVICE);
2565 
2566                 /* Set address of the data in the Transmit Address register */
2567                 macb_writel(lp, TAR, lp->skb_physaddr);
2568                 /* Set length of the packet in the Transmit Control register */
2569                 macb_writel(lp, TCR, skb->len);
2570 
2571         } else {
2572                 netdev_err(dev, "%s called, but device is busy!\n", __func__);
2573                 return NETDEV_TX_BUSY;
2574         }
2575 
2576         return NETDEV_TX_OK;
2577 }
2578 
2579 /* Extract received frame from buffer descriptors and sent to upper layers.
2580  * (Called from interrupt context)
2581  */
2582 static void at91ether_rx(struct net_device *dev)
2583 {
2584         struct macb *lp = netdev_priv(dev);
2585         unsigned char *p_recv;
2586         struct sk_buff *skb;
2587         unsigned int pktlen;
2588 
2589         while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
2590                 p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
2591                 pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
2592                 skb = netdev_alloc_skb(dev, pktlen + 2);
2593                 if (skb) {
2594                         skb_reserve(skb, 2);
2595                         memcpy(skb_put(skb, pktlen), p_recv, pktlen);
2596 
2597                         skb->protocol = eth_type_trans(skb, dev);
2598                         lp->stats.rx_packets++;
2599                         lp->stats.rx_bytes += pktlen;
2600                         netif_rx(skb);
2601                 } else {
2602                         lp->stats.rx_dropped++;
2603                 }
2604 
2605                 if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
2606                         lp->stats.multicast++;
2607 
2608                 /* reset ownership bit */
2609                 lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
2610 
2611                 /* wrap after last buffer */
2612                 if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
2613                         lp->rx_tail = 0;
2614                 else
2615                         lp->rx_tail++;
2616         }
2617 }
2618 
2619 /* MAC interrupt handler */
2620 static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
2621 {
2622         struct net_device *dev = dev_id;
2623         struct macb *lp = netdev_priv(dev);
2624         u32 intstatus, ctl;
2625 
2626         /* MAC Interrupt Status register indicates what interrupts are pending.
2627          * It is automatically cleared once read.
2628          */
2629         intstatus = macb_readl(lp, ISR);
2630 
2631         /* Receive complete */
2632         if (intstatus & MACB_BIT(RCOMP))
2633                 at91ether_rx(dev);
2634 
2635         /* Transmit complete */
2636         if (intstatus & MACB_BIT(TCOMP)) {
2637                 /* The TCOM bit is set even if the transmission failed */
2638                 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
2639                         lp->stats.tx_errors++;
2640 
2641                 if (lp->skb) {
2642                         dev_kfree_skb_irq(lp->skb);
2643                         lp->skb = NULL;
2644                         dma_unmap_single(NULL, lp->skb_physaddr,
2645                                          lp->skb_length, DMA_TO_DEVICE);
2646                         lp->stats.tx_packets++;
2647                         lp->stats.tx_bytes += lp->skb_length;
2648                 }
2649                 netif_wake_queue(dev);
2650         }
2651 
2652         /* Work-around for EMAC Errata section 41.3.1 */
2653         if (intstatus & MACB_BIT(RXUBR)) {
2654                 ctl = macb_readl(lp, NCR);
2655                 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
2656                 macb_writel(lp, NCR, ctl | MACB_BIT(RE));
2657         }
2658 
2659         if (intstatus & MACB_BIT(ISR_ROVR))
2660                 netdev_err(dev, "ROVR error\n");
2661 
2662         return IRQ_HANDLED;
2663 }
2664 
2665 #ifdef CONFIG_NET_POLL_CONTROLLER
2666 static void at91ether_poll_controller(struct net_device *dev)
2667 {
2668         unsigned long flags;
2669 
2670         local_irq_save(flags);
2671         at91ether_interrupt(dev->irq, dev);
2672         local_irq_restore(flags);
2673 }
2674 #endif
2675 
2676 static const struct net_device_ops at91ether_netdev_ops = {
2677         .ndo_open               = at91ether_open,
2678         .ndo_stop               = at91ether_close,
2679         .ndo_start_xmit         = at91ether_start_xmit,
2680         .ndo_get_stats          = macb_get_stats,
2681         .ndo_set_rx_mode        = macb_set_rx_mode,
2682         .ndo_set_mac_address    = eth_mac_addr,
2683         .ndo_do_ioctl           = macb_ioctl,
2684         .ndo_validate_addr      = eth_validate_addr,
2685         .ndo_change_mtu         = eth_change_mtu,
2686 #ifdef CONFIG_NET_POLL_CONTROLLER
2687         .ndo_poll_controller    = at91ether_poll_controller,
2688 #endif
2689 };
2690 
2691 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
2692                               struct clk **hclk, struct clk **tx_clk)
2693 {
2694         int err;
2695 
2696         *hclk = NULL;
2697         *tx_clk = NULL;
2698 
2699         *pclk = devm_clk_get(&pdev->dev, "ether_clk");
2700         if (IS_ERR(*pclk))
2701                 return PTR_ERR(*pclk);
2702 
2703         err = clk_prepare_enable(*pclk);
2704         if (err) {
2705                 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
2706                 return err;
2707         }
2708 
2709         return 0;
2710 }
2711 
2712 static int at91ether_init(struct platform_device *pdev)
2713 {
2714         struct net_device *dev = platform_get_drvdata(pdev);
2715         struct macb *bp = netdev_priv(dev);
2716         int err;
2717         u32 reg;
2718 
2719         dev->netdev_ops = &at91ether_netdev_ops;
2720         dev->ethtool_ops = &macb_ethtool_ops;
2721 
2722         err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
2723                                0, dev->name, dev);
2724         if (err)
2725                 return err;
2726 
2727         macb_writel(bp, NCR, 0);
2728 
2729         reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
2730         if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
2731                 reg |= MACB_BIT(RM9200_RMII);
2732 
2733         macb_writel(bp, NCFGR, reg);
2734 
2735         return 0;
2736 }
2737 
2738 static const struct macb_config at91sam9260_config = {
2739         .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII,
2740         .clk_init = macb_clk_init,
2741         .init = macb_init,
2742 };
2743 
2744 static const struct macb_config pc302gem_config = {
2745         .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
2746         .dma_burst_length = 16,
2747         .clk_init = macb_clk_init,
2748         .init = macb_init,
2749 };
2750 
2751 static const struct macb_config sama5d2_config = {
2752         .caps = 0,
2753         .dma_burst_length = 16,
2754         .clk_init = macb_clk_init,
2755         .init = macb_init,
2756 };
2757 
2758 static const struct macb_config sama5d3_config = {
2759         .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
2760         .dma_burst_length = 16,
2761         .clk_init = macb_clk_init,
2762         .init = macb_init,
2763 };
2764 
2765 static const struct macb_config sama5d4_config = {
2766         .caps = 0,
2767         .dma_burst_length = 4,
2768         .clk_init = macb_clk_init,
2769         .init = macb_init,
2770 };
2771 
2772 static const struct macb_config emac_config = {
2773         .clk_init = at91ether_clk_init,
2774         .init = at91ether_init,
2775 };
2776 
2777 static const struct macb_config np4_config = {
2778         .caps = MACB_CAPS_USRIO_DISABLED,
2779         .clk_init = macb_clk_init,
2780         .init = macb_init,
2781 };
2782 
2783 static const struct macb_config zynqmp_config = {
2784         .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
2785         .dma_burst_length = 16,
2786         .clk_init = macb_clk_init,
2787         .init = macb_init,
2788         .jumbo_max_len = 10240,
2789 };
2790 
2791 static const struct macb_config zynq_config = {
2792         .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
2793         .dma_burst_length = 16,
2794         .clk_init = macb_clk_init,
2795         .init = macb_init,
2796 };
2797 
2798 static const struct of_device_id macb_dt_ids[] = {
2799         { .compatible = "cdns,at32ap7000-macb" },
2800         { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
2801         { .compatible = "cdns,macb" },
2802         { .compatible = "cdns,np4-macb", .data = &np4_config },
2803         { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
2804         { .compatible = "cdns,gem", .data = &pc302gem_config },
2805         { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
2806         { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
2807         { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
2808         { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
2809         { .compatible = "cdns,emac", .data = &emac_config },
2810         { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
2811         { .compatible = "cdns,zynq-gem", .data = &zynq_config },
2812         { /* sentinel */ }
2813 };
2814 MODULE_DEVICE_TABLE(of, macb_dt_ids);
2815 #endif /* CONFIG_OF */
2816 
2817 static int macb_probe(struct platform_device *pdev)
2818 {
2819         int (*clk_init)(struct platform_device *, struct clk **,
2820                         struct clk **, struct clk **)
2821                                               = macb_clk_init;
2822         int (*init)(struct platform_device *) = macb_init;
2823         struct device_node *np = pdev->dev.of_node;
2824         struct device_node *phy_node;
2825         const struct macb_config *macb_config = NULL;
2826         struct clk *pclk, *hclk = NULL, *tx_clk = NULL;
2827         unsigned int queue_mask, num_queues;
2828         struct macb_platform_data *pdata;
2829         bool native_io;
2830         struct phy_device *phydev;
2831         struct net_device *dev;
2832         struct resource *regs;
2833         void __iomem *mem;
2834         const char *mac;
2835         struct macb *bp;
2836         int err;
2837 
2838         regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2839         mem = devm_ioremap_resource(&pdev->dev, regs);
2840         if (IS_ERR(mem))
2841                 return PTR_ERR(mem);
2842 
2843         if (np) {
2844                 const struct of_device_id *match;
2845 
2846                 match = of_match_node(macb_dt_ids, np);
2847                 if (match && match->data) {
2848                         macb_config = match->data;
2849                         clk_init = macb_config->clk_init;
2850                         init = macb_config->init;
2851                 }
2852         }
2853 
2854         err = clk_init(pdev, &pclk, &hclk, &tx_clk);
2855         if (err)
2856                 return err;
2857 
2858         native_io = hw_is_native_io(mem);
2859 
2860         macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
2861         dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
2862         if (!dev) {
2863                 err = -ENOMEM;
2864                 goto err_disable_clocks;
2865         }
2866 
2867         dev->base_addr = regs->start;
2868 
2869         SET_NETDEV_DEV(dev, &pdev->dev);
2870 
2871         bp = netdev_priv(dev);
2872         bp->pdev = pdev;
2873         bp->dev = dev;
2874         bp->regs = mem;
2875         bp->native_io = native_io;
2876         if (native_io) {
2877                 bp->macb_reg_readl = hw_readl_native;
2878                 bp->macb_reg_writel = hw_writel_native;
2879         } else {
2880                 bp->macb_reg_readl = hw_readl;
2881                 bp->macb_reg_writel = hw_writel;
2882         }
2883         bp->num_queues = num_queues;
2884         bp->queue_mask = queue_mask;
2885         if (macb_config)
2886                 bp->dma_burst_length = macb_config->dma_burst_length;
2887         bp->pclk = pclk;
2888         bp->hclk = hclk;
2889         bp->tx_clk = tx_clk;
2890         if (macb_config)
2891                 bp->jumbo_max_len = macb_config->jumbo_max_len;
2892 
2893         spin_lock_init(&bp->lock);
2894 
2895         /* setup capabilities */
2896         macb_configure_caps(bp, macb_config);
2897 
2898         platform_set_drvdata(pdev, dev);
2899 
2900         dev->irq = platform_get_irq(pdev, 0);
2901         if (dev->irq < 0) {
2902                 err = dev->irq;
2903                 goto err_disable_clocks;
2904         }
2905 
2906         mac = of_get_mac_address(np);
2907         if (mac)
2908                 memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
2909         else
2910                 macb_get_hwaddr(bp);
2911 
2912         /* Power up the PHY if there is a GPIO reset */
2913         phy_node =  of_get_next_available_child(np, NULL);
2914         if (phy_node) {
2915                 int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
2916                 if (gpio_is_valid(gpio))
2917                         bp->reset_gpio = gpio_to_desc(gpio);
2918                 gpiod_set_value(bp->reset_gpio, GPIOD_OUT_HIGH);
2919         }
2920         of_node_put(phy_node);
2921 
2922         err = of_get_phy_mode(np);
2923         if (err < 0) {
2924                 pdata = dev_get_platdata(&pdev->dev);
2925                 if (pdata && pdata->is_rmii)
2926                         bp->phy_interface = PHY_INTERFACE_MODE_RMII;
2927                 else
2928                         bp->phy_interface = PHY_INTERFACE_MODE_MII;
2929         } else {
2930                 bp->phy_interface = err;
2931         }
2932 
2933         /* IP specific init */
2934         err = init(pdev);
2935         if (err)
2936                 goto err_out_free_netdev;
2937 
2938         err = register_netdev(dev);
2939         if (err) {
2940                 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2941                 goto err_out_unregister_netdev;
2942         }
2943 
2944         err = macb_mii_init(bp);
2945         if (err)
2946                 goto err_out_unregister_netdev;
2947 
2948         netif_carrier_off(dev);
2949 
2950         netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
2951                     macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
2952                     dev->base_addr, dev->irq, dev->dev_addr);
2953 
2954         phydev = bp->phy_dev;
2955         phy_attached_info(phydev);
2956 
2957         return 0;
2958 
2959 err_out_unregister_netdev:
2960         unregister_netdev(dev);
2961 
2962 err_out_free_netdev:
2963         free_netdev(dev);
2964 
2965 err_disable_clocks:
2966         clk_disable_unprepare(tx_clk);
2967         clk_disable_unprepare(hclk);
2968         clk_disable_unprepare(pclk);
2969 
2970         return err;
2971 }
2972 
2973 static int macb_remove(struct platform_device *pdev)
2974 {
2975         struct net_device *dev;
2976         struct macb *bp;
2977 
2978         dev = platform_get_drvdata(pdev);
2979 
2980         if (dev) {
2981                 bp = netdev_priv(dev);
2982                 if (bp->phy_dev)
2983                         phy_disconnect(bp->phy_dev);
2984                 mdiobus_unregister(bp->mii_bus);
2985                 mdiobus_free(bp->mii_bus);
2986 
2987                 /* Shutdown the PHY if there is a GPIO reset */
2988                 gpiod_set_value(bp->reset_gpio, GPIOD_OUT_LOW);
2989 
2990                 unregister_netdev(dev);
2991                 clk_disable_unprepare(bp->tx_clk);
2992                 clk_disable_unprepare(bp->hclk);
2993                 clk_disable_unprepare(bp->pclk);
2994                 free_netdev(dev);
2995         }
2996 
2997         return 0;
2998 }
2999 
3000 static int __maybe_unused macb_suspend(struct device *dev)
3001 {
3002         struct platform_device *pdev = to_platform_device(dev);
3003         struct net_device *netdev = platform_get_drvdata(pdev);
3004         struct macb *bp = netdev_priv(netdev);
3005 
3006         netif_carrier_off(netdev);
3007         netif_device_detach(netdev);
3008 
3009         clk_disable_unprepare(bp->tx_clk);
3010         clk_disable_unprepare(bp->hclk);
3011         clk_disable_unprepare(bp->pclk);
3012 
3013         return 0;
3014 }
3015 
3016 static int __maybe_unused macb_resume(struct device *dev)
3017 {
3018         struct platform_device *pdev = to_platform_device(dev);
3019         struct net_device *netdev = platform_get_drvdata(pdev);
3020         struct macb *bp = netdev_priv(netdev);
3021 
3022         clk_prepare_enable(bp->pclk);
3023         clk_prepare_enable(bp->hclk);
3024         clk_prepare_enable(bp->tx_clk);
3025 
3026         netif_device_attach(netdev);
3027 
3028         return 0;
3029 }
3030 
3031 static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
3032 
3033 static struct platform_driver macb_driver = {
3034         .probe          = macb_probe,
3035         .remove         = macb_remove,
3036         .driver         = {
3037                 .name           = "macb",
3038                 .of_match_table = of_match_ptr(macb_dt_ids),
3039                 .pm     = &macb_pm_ops,
3040         },
3041 };
3042 
3043 module_platform_driver(macb_driver);
3044 
3045 MODULE_LICENSE("GPL");
3046 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
3047 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
3048 MODULE_ALIAS("platform:macb");
3049 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us