Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/drivers/net/ethernet/cadence/macb.c

  1 /*
  2  * Cadence MACB/GEM Ethernet Controller driver
  3  *
  4  * Copyright (C) 2004-2006 Atmel Corporation
  5  *
  6  * This program is free software; you can redistribute it and/or modify
  7  * it under the terms of the GNU General Public License version 2 as
  8  * published by the Free Software Foundation.
  9  */
 10 
 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 12 #include <linux/clk.h>
 13 #include <linux/module.h>
 14 #include <linux/moduleparam.h>
 15 #include <linux/kernel.h>
 16 #include <linux/types.h>
 17 #include <linux/circ_buf.h>
 18 #include <linux/slab.h>
 19 #include <linux/init.h>
 20 #include <linux/io.h>
 21 #include <linux/gpio.h>
 22 #include <linux/gpio/consumer.h>
 23 #include <linux/interrupt.h>
 24 #include <linux/netdevice.h>
 25 #include <linux/etherdevice.h>
 26 #include <linux/dma-mapping.h>
 27 #include <linux/platform_data/macb.h>
 28 #include <linux/platform_device.h>
 29 #include <linux/phy.h>
 30 #include <linux/of.h>
 31 #include <linux/of_device.h>
 32 #include <linux/of_gpio.h>
 33 #include <linux/of_mdio.h>
 34 #include <linux/of_net.h>
 35 #include <linux/ip.h>
 36 #include <linux/udp.h>
 37 #include <linux/tcp.h>
 38 #include "macb.h"
 39 
 40 #define MACB_RX_BUFFER_SIZE     128
 41 #define RX_BUFFER_MULTIPLE      64  /* bytes */
 42 
 43 #define DEFAULT_RX_RING_SIZE    512 /* must be power of 2 */
 44 #define MIN_RX_RING_SIZE        64
 45 #define MAX_RX_RING_SIZE        8192
 46 #define RX_RING_BYTES(bp)       (macb_dma_desc_get_size(bp)     \
 47                                  * (bp)->rx_ring_size)
 48 
 49 #define DEFAULT_TX_RING_SIZE    512 /* must be power of 2 */
 50 #define MIN_TX_RING_SIZE        64
 51 #define MAX_TX_RING_SIZE        4096
 52 #define TX_RING_BYTES(bp)       (macb_dma_desc_get_size(bp)     \
 53                                  * (bp)->tx_ring_size)
 54 
 55 /* level of occupied TX descriptors under which we wake up TX process */
 56 #define MACB_TX_WAKEUP_THRESH(bp)       (3 * (bp)->tx_ring_size / 4)
 57 
 58 #define MACB_RX_INT_FLAGS       (MACB_BIT(RCOMP) | MACB_BIT(RXUBR)      \
 59                                  | MACB_BIT(ISR_ROVR))
 60 #define MACB_TX_ERR_FLAGS       (MACB_BIT(ISR_TUND)                     \
 61                                         | MACB_BIT(ISR_RLE)             \
 62                                         | MACB_BIT(TXERR))
 63 #define MACB_TX_INT_FLAGS       (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 64 
 65 /* Max length of transmit frame must be a multiple of 8 bytes */
 66 #define MACB_TX_LEN_ALIGN       8
 67 #define MACB_MAX_TX_LEN         ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
 68 #define GEM_MAX_TX_LEN          ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
 69 
 70 #define GEM_MTU_MIN_SIZE        ETH_MIN_MTU
 71 #define MACB_NETIF_LSO          (NETIF_F_TSO | NETIF_F_UFO)
 72 
 73 #define MACB_WOL_HAS_MAGIC_PACKET       (0x1 << 0)
 74 #define MACB_WOL_ENABLED                (0x1 << 1)
 75 
 76 /* Graceful stop timeouts in us. We should allow up to
 77  * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
 78  */
 79 #define MACB_HALT_TIMEOUT       1230
 80 
 81 /* DMA buffer descriptor might be different size
 82  * depends on hardware configuration.
 83  */
 84 static unsigned int macb_dma_desc_get_size(struct macb *bp)
 85 {
 86 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 87         if (bp->hw_dma_cap == HW_DMA_CAP_64B)
 88                 return sizeof(struct macb_dma_desc) + sizeof(struct macb_dma_desc_64);
 89 #endif
 90         return sizeof(struct macb_dma_desc);
 91 }
 92 
 93 static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int idx)
 94 {
 95 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 96         /* Dma buffer descriptor is 4 words length (instead of 2 words)
 97          * for 64b GEM.
 98          */
 99         if (bp->hw_dma_cap == HW_DMA_CAP_64B)
100                 idx <<= 1;
101 #endif
102         return idx;
103 }
104 
105 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
106 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
107 {
108         return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
109 }
110 #endif
111 
112 /* Ring buffer accessors */
113 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
114 {
115         return index & (bp->tx_ring_size - 1);
116 }
117 
118 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
119                                           unsigned int index)
120 {
121         index = macb_tx_ring_wrap(queue->bp, index);
122         index = macb_adj_dma_desc_idx(queue->bp, index);
123         return &queue->tx_ring[index];
124 }
125 
126 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
127                                        unsigned int index)
128 {
129         return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
130 }
131 
132 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
133 {
134         dma_addr_t offset;
135 
136         offset = macb_tx_ring_wrap(queue->bp, index) *
137                         macb_dma_desc_get_size(queue->bp);
138 
139         return queue->tx_ring_dma + offset;
140 }
141 
142 static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
143 {
144         return index & (bp->rx_ring_size - 1);
145 }
146 
147 static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
148 {
149         index = macb_rx_ring_wrap(bp, index);
150         index = macb_adj_dma_desc_idx(bp, index);
151         return &bp->rx_ring[index];
152 }
153 
154 static void *macb_rx_buffer(struct macb *bp, unsigned int index)
155 {
156         return bp->rx_buffers + bp->rx_buffer_size *
157                macb_rx_ring_wrap(bp, index);
158 }
159 
160 /* I/O accessors */
161 static u32 hw_readl_native(struct macb *bp, int offset)
162 {
163         return __raw_readl(bp->regs + offset);
164 }
165 
166 static void hw_writel_native(struct macb *bp, int offset, u32 value)
167 {
168         __raw_writel(value, bp->regs + offset);
169 }
170 
171 static u32 hw_readl(struct macb *bp, int offset)
172 {
173         return readl_relaxed(bp->regs + offset);
174 }
175 
176 static void hw_writel(struct macb *bp, int offset, u32 value)
177 {
178         writel_relaxed(value, bp->regs + offset);
179 }
180 
181 /* Find the CPU endianness by using the loopback bit of NCR register. When the
182  * CPU is in big endian we need to program swapped mode for management
183  * descriptor access.
184  */
185 static bool hw_is_native_io(void __iomem *addr)
186 {
187         u32 value = MACB_BIT(LLB);
188 
189         __raw_writel(value, addr + MACB_NCR);
190         value = __raw_readl(addr + MACB_NCR);
191 
192         /* Write 0 back to disable everything */
193         __raw_writel(0, addr + MACB_NCR);
194 
195         return value == MACB_BIT(LLB);
196 }
197 
198 static bool hw_is_gem(void __iomem *addr, bool native_io)
199 {
200         u32 id;
201 
202         if (native_io)
203                 id = __raw_readl(addr + MACB_MID);
204         else
205                 id = readl_relaxed(addr + MACB_MID);
206 
207         return MACB_BFEXT(IDNUM, id) >= 0x2;
208 }
209 
210 static void macb_set_hwaddr(struct macb *bp)
211 {
212         u32 bottom;
213         u16 top;
214 
215         bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
216         macb_or_gem_writel(bp, SA1B, bottom);
217         top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
218         macb_or_gem_writel(bp, SA1T, top);
219 
220         /* Clear unused address register sets */
221         macb_or_gem_writel(bp, SA2B, 0);
222         macb_or_gem_writel(bp, SA2T, 0);
223         macb_or_gem_writel(bp, SA3B, 0);
224         macb_or_gem_writel(bp, SA3T, 0);
225         macb_or_gem_writel(bp, SA4B, 0);
226         macb_or_gem_writel(bp, SA4T, 0);
227 }
228 
229 static void macb_get_hwaddr(struct macb *bp)
230 {
231         struct macb_platform_data *pdata;
232         u32 bottom;
233         u16 top;
234         u8 addr[6];
235         int i;
236 
237         pdata = dev_get_platdata(&bp->pdev->dev);
238 
239         /* Check all 4 address register for valid address */
240         for (i = 0; i < 4; i++) {
241                 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
242                 top = macb_or_gem_readl(bp, SA1T + i * 8);
243 
244                 if (pdata && pdata->rev_eth_addr) {
245                         addr[5] = bottom & 0xff;
246                         addr[4] = (bottom >> 8) & 0xff;
247                         addr[3] = (bottom >> 16) & 0xff;
248                         addr[2] = (bottom >> 24) & 0xff;
249                         addr[1] = top & 0xff;
250                         addr[0] = (top & 0xff00) >> 8;
251                 } else {
252                         addr[0] = bottom & 0xff;
253                         addr[1] = (bottom >> 8) & 0xff;
254                         addr[2] = (bottom >> 16) & 0xff;
255                         addr[3] = (bottom >> 24) & 0xff;
256                         addr[4] = top & 0xff;
257                         addr[5] = (top >> 8) & 0xff;
258                 }
259 
260                 if (is_valid_ether_addr(addr)) {
261                         memcpy(bp->dev->dev_addr, addr, sizeof(addr));
262                         return;
263                 }
264         }
265 
266         dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
267         eth_hw_addr_random(bp->dev);
268 }
269 
270 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
271 {
272         struct macb *bp = bus->priv;
273         int value;
274 
275         macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
276                               | MACB_BF(RW, MACB_MAN_READ)
277                               | MACB_BF(PHYA, mii_id)
278                               | MACB_BF(REGA, regnum)
279                               | MACB_BF(CODE, MACB_MAN_CODE)));
280 
281         /* wait for end of transfer */
282         while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
283                 cpu_relax();
284 
285         value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
286 
287         return value;
288 }
289 
290 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
291                            u16 value)
292 {
293         struct macb *bp = bus->priv;
294 
295         macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
296                               | MACB_BF(RW, MACB_MAN_WRITE)
297                               | MACB_BF(PHYA, mii_id)
298                               | MACB_BF(REGA, regnum)
299                               | MACB_BF(CODE, MACB_MAN_CODE)
300                               | MACB_BF(DATA, value)));
301 
302         /* wait for end of transfer */
303         while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
304                 cpu_relax();
305 
306         return 0;
307 }
308 
309 /**
310  * macb_set_tx_clk() - Set a clock to a new frequency
311  * @clk         Pointer to the clock to change
312  * @rate        New frequency in Hz
313  * @dev         Pointer to the struct net_device
314  */
315 static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
316 {
317         long ferr, rate, rate_rounded;
318 
319         if (!clk)
320                 return;
321 
322         switch (speed) {
323         case SPEED_10:
324                 rate = 2500000;
325                 break;
326         case SPEED_100:
327                 rate = 25000000;
328                 break;
329         case SPEED_1000:
330                 rate = 125000000;
331                 break;
332         default:
333                 return;
334         }
335 
336         rate_rounded = clk_round_rate(clk, rate);
337         if (rate_rounded < 0)
338                 return;
339 
340         /* RGMII allows 50 ppm frequency error. Test and warn if this limit
341          * is not satisfied.
342          */
343         ferr = abs(rate_rounded - rate);
344         ferr = DIV_ROUND_UP(ferr, rate / 100000);
345         if (ferr > 5)
346                 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
347                             rate);
348 
349         if (clk_set_rate(clk, rate_rounded))
350                 netdev_err(dev, "adjusting tx_clk failed.\n");
351 }
352 
353 static void macb_handle_link_change(struct net_device *dev)
354 {
355         struct macb *bp = netdev_priv(dev);
356         struct phy_device *phydev = dev->phydev;
357         unsigned long flags;
358         int status_change = 0;
359 
360         spin_lock_irqsave(&bp->lock, flags);
361 
362         if (phydev->link) {
363                 if ((bp->speed != phydev->speed) ||
364                     (bp->duplex != phydev->duplex)) {
365                         u32 reg;
366 
367                         reg = macb_readl(bp, NCFGR);
368                         reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
369                         if (macb_is_gem(bp))
370                                 reg &= ~GEM_BIT(GBE);
371 
372                         if (phydev->duplex)
373                                 reg |= MACB_BIT(FD);
374                         if (phydev->speed == SPEED_100)
375                                 reg |= MACB_BIT(SPD);
376                         if (phydev->speed == SPEED_1000 &&
377                             bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
378                                 reg |= GEM_BIT(GBE);
379 
380                         macb_or_gem_writel(bp, NCFGR, reg);
381 
382                         bp->speed = phydev->speed;
383                         bp->duplex = phydev->duplex;
384                         status_change = 1;
385                 }
386         }
387 
388         if (phydev->link != bp->link) {
389                 if (!phydev->link) {
390                         bp->speed = 0;
391                         bp->duplex = -1;
392                 }
393                 bp->link = phydev->link;
394 
395                 status_change = 1;
396         }
397 
398         spin_unlock_irqrestore(&bp->lock, flags);
399 
400         if (status_change) {
401                 if (phydev->link) {
402                         /* Update the TX clock rate if and only if the link is
403                          * up and there has been a link change.
404                          */
405                         macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
406 
407                         netif_carrier_on(dev);
408                         netdev_info(dev, "link up (%d/%s)\n",
409                                     phydev->speed,
410                                     phydev->duplex == DUPLEX_FULL ?
411                                     "Full" : "Half");
412                 } else {
413                         netif_carrier_off(dev);
414                         netdev_info(dev, "link down\n");
415                 }
416         }
417 }
418 
419 /* based on au1000_eth. c*/
420 static int macb_mii_probe(struct net_device *dev)
421 {
422         struct macb *bp = netdev_priv(dev);
423         struct macb_platform_data *pdata;
424         struct phy_device *phydev;
425         int phy_irq;
426         int ret;
427 
428         phydev = phy_find_first(bp->mii_bus);
429         if (!phydev) {
430                 netdev_err(dev, "no PHY found\n");
431                 return -ENXIO;
432         }
433 
434         pdata = dev_get_platdata(&bp->pdev->dev);
435         if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
436                 ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin,
437                                         "phy int");
438                 if (!ret) {
439                         phy_irq = gpio_to_irq(pdata->phy_irq_pin);
440                         phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
441                 }
442         } else {
443                 phydev->irq = PHY_POLL;
444         }
445 
446         /* attach the mac to the phy */
447         ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
448                                  bp->phy_interface);
449         if (ret) {
450                 netdev_err(dev, "Could not attach to PHY\n");
451                 return ret;
452         }
453 
454         /* mask with MAC supported features */
455         if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
456                 phydev->supported &= PHY_GBIT_FEATURES;
457         else
458                 phydev->supported &= PHY_BASIC_FEATURES;
459 
460         if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
461                 phydev->supported &= ~SUPPORTED_1000baseT_Half;
462 
463         phydev->advertising = phydev->supported;
464 
465         bp->link = 0;
466         bp->speed = 0;
467         bp->duplex = -1;
468 
469         return 0;
470 }
471 
472 static int macb_mii_init(struct macb *bp)
473 {
474         struct macb_platform_data *pdata;
475         struct device_node *np;
476         int err = -ENXIO, i;
477 
478         /* Enable management port */
479         macb_writel(bp, NCR, MACB_BIT(MPE));
480 
481         bp->mii_bus = mdiobus_alloc();
482         if (!bp->mii_bus) {
483                 err = -ENOMEM;
484                 goto err_out;
485         }
486 
487         bp->mii_bus->name = "MACB_mii_bus";
488         bp->mii_bus->read = &macb_mdio_read;
489         bp->mii_bus->write = &macb_mdio_write;
490         snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
491                  bp->pdev->name, bp->pdev->id);
492         bp->mii_bus->priv = bp;
493         bp->mii_bus->parent = &bp->pdev->dev;
494         pdata = dev_get_platdata(&bp->pdev->dev);
495 
496         dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
497 
498         np = bp->pdev->dev.of_node;
499         if (np) {
500                 /* try dt phy registration */
501                 err = of_mdiobus_register(bp->mii_bus, np);
502 
503                 /* fallback to standard phy registration if no phy were
504                  * found during dt phy registration
505                  */
506                 if (!err && !phy_find_first(bp->mii_bus)) {
507                         for (i = 0; i < PHY_MAX_ADDR; i++) {
508                                 struct phy_device *phydev;
509 
510                                 phydev = mdiobus_scan(bp->mii_bus, i);
511                                 if (IS_ERR(phydev) &&
512                                     PTR_ERR(phydev) != -ENODEV) {
513                                         err = PTR_ERR(phydev);
514                                         break;
515                                 }
516                         }
517 
518                         if (err)
519                                 goto err_out_unregister_bus;
520                 }
521         } else {
522                 for (i = 0; i < PHY_MAX_ADDR; i++)
523                         bp->mii_bus->irq[i] = PHY_POLL;
524 
525                 if (pdata)
526                         bp->mii_bus->phy_mask = pdata->phy_mask;
527 
528                 err = mdiobus_register(bp->mii_bus);
529         }
530 
531         if (err)
532                 goto err_out_free_mdiobus;
533 
534         err = macb_mii_probe(bp->dev);
535         if (err)
536                 goto err_out_unregister_bus;
537 
538         return 0;
539 
540 err_out_unregister_bus:
541         mdiobus_unregister(bp->mii_bus);
542 err_out_free_mdiobus:
543         mdiobus_free(bp->mii_bus);
544 err_out:
545         return err;
546 }
547 
548 static void macb_update_stats(struct macb *bp)
549 {
550         u32 *p = &bp->hw_stats.macb.rx_pause_frames;
551         u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
552         int offset = MACB_PFR;
553 
554         WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
555 
556         for (; p < end; p++, offset += 4)
557                 *p += bp->macb_reg_readl(bp, offset);
558 }
559 
560 static int macb_halt_tx(struct macb *bp)
561 {
562         unsigned long   halt_time, timeout;
563         u32             status;
564 
565         macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
566 
567         timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
568         do {
569                 halt_time = jiffies;
570                 status = macb_readl(bp, TSR);
571                 if (!(status & MACB_BIT(TGO)))
572                         return 0;
573 
574                 usleep_range(10, 250);
575         } while (time_before(halt_time, timeout));
576 
577         return -ETIMEDOUT;
578 }
579 
580 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
581 {
582         if (tx_skb->mapping) {
583                 if (tx_skb->mapped_as_page)
584                         dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
585                                        tx_skb->size, DMA_TO_DEVICE);
586                 else
587                         dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
588                                          tx_skb->size, DMA_TO_DEVICE);
589                 tx_skb->mapping = 0;
590         }
591 
592         if (tx_skb->skb) {
593                 dev_kfree_skb_any(tx_skb->skb);
594                 tx_skb->skb = NULL;
595         }
596 }
597 
598 static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
599 {
600 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
601         struct macb_dma_desc_64 *desc_64;
602 
603         if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
604                 desc_64 = macb_64b_desc(bp, desc);
605                 desc_64->addrh = upper_32_bits(addr);
606         }
607 #endif
608         desc->addr = lower_32_bits(addr);
609 }
610 
611 static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
612 {
613         dma_addr_t addr = 0;
614 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
615         struct macb_dma_desc_64 *desc_64;
616 
617         if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
618                 desc_64 = macb_64b_desc(bp, desc);
619                 addr = ((u64)(desc_64->addrh) << 32);
620         }
621 #endif
622         addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
623         return addr;
624 }
625 
626 static void macb_tx_error_task(struct work_struct *work)
627 {
628         struct macb_queue       *queue = container_of(work, struct macb_queue,
629                                                       tx_error_task);
630         struct macb             *bp = queue->bp;
631         struct macb_tx_skb      *tx_skb;
632         struct macb_dma_desc    *desc;
633         struct sk_buff          *skb;
634         unsigned int            tail;
635         unsigned long           flags;
636 
637         netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
638                     (unsigned int)(queue - bp->queues),
639                     queue->tx_tail, queue->tx_head);
640 
641         /* Prevent the queue IRQ handlers from running: each of them may call
642          * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
643          * As explained below, we have to halt the transmission before updating
644          * TBQP registers so we call netif_tx_stop_all_queues() to notify the
645          * network engine about the macb/gem being halted.
646          */
647         spin_lock_irqsave(&bp->lock, flags);
648 
649         /* Make sure nobody is trying to queue up new packets */
650         netif_tx_stop_all_queues(bp->dev);
651 
652         /* Stop transmission now
653          * (in case we have just queued new packets)
654          * macb/gem must be halted to write TBQP register
655          */
656         if (macb_halt_tx(bp))
657                 /* Just complain for now, reinitializing TX path can be good */
658                 netdev_err(bp->dev, "BUG: halt tx timed out\n");
659 
660         /* Treat frames in TX queue including the ones that caused the error.
661          * Free transmit buffers in upper layer.
662          */
663         for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
664                 u32     ctrl;
665 
666                 desc = macb_tx_desc(queue, tail);
667                 ctrl = desc->ctrl;
668                 tx_skb = macb_tx_skb(queue, tail);
669                 skb = tx_skb->skb;
670 
671                 if (ctrl & MACB_BIT(TX_USED)) {
672                         /* skb is set for the last buffer of the frame */
673                         while (!skb) {
674                                 macb_tx_unmap(bp, tx_skb);
675                                 tail++;
676                                 tx_skb = macb_tx_skb(queue, tail);
677                                 skb = tx_skb->skb;
678                         }
679 
680                         /* ctrl still refers to the first buffer descriptor
681                          * since it's the only one written back by the hardware
682                          */
683                         if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
684                                 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
685                                             macb_tx_ring_wrap(bp, tail),
686                                             skb->data);
687                                 bp->stats.tx_packets++;
688                                 bp->stats.tx_bytes += skb->len;
689                         }
690                 } else {
691                         /* "Buffers exhausted mid-frame" errors may only happen
692                          * if the driver is buggy, so complain loudly about
693                          * those. Statistics are updated by hardware.
694                          */
695                         if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
696                                 netdev_err(bp->dev,
697                                            "BUG: TX buffers exhausted mid-frame\n");
698 
699                         desc->ctrl = ctrl | MACB_BIT(TX_USED);
700                 }
701 
702                 macb_tx_unmap(bp, tx_skb);
703         }
704 
705         /* Set end of TX queue */
706         desc = macb_tx_desc(queue, 0);
707         macb_set_addr(bp, desc, 0);
708         desc->ctrl = MACB_BIT(TX_USED);
709 
710         /* Make descriptor updates visible to hardware */
711         wmb();
712 
713         /* Reinitialize the TX desc queue */
714         queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
715 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
716         if (bp->hw_dma_cap == HW_DMA_CAP_64B)
717                 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
718 #endif
719         /* Make TX ring reflect state of hardware */
720         queue->tx_head = 0;
721         queue->tx_tail = 0;
722 
723         /* Housework before enabling TX IRQ */
724         macb_writel(bp, TSR, macb_readl(bp, TSR));
725         queue_writel(queue, IER, MACB_TX_INT_FLAGS);
726 
727         /* Now we are ready to start transmission again */
728         netif_tx_start_all_queues(bp->dev);
729         macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
730 
731         spin_unlock_irqrestore(&bp->lock, flags);
732 }
733 
734 static void macb_tx_interrupt(struct macb_queue *queue)
735 {
736         unsigned int tail;
737         unsigned int head;
738         u32 status;
739         struct macb *bp = queue->bp;
740         u16 queue_index = queue - bp->queues;
741 
742         status = macb_readl(bp, TSR);
743         macb_writel(bp, TSR, status);
744 
745         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
746                 queue_writel(queue, ISR, MACB_BIT(TCOMP));
747 
748         netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
749                     (unsigned long)status);
750 
751         head = queue->tx_head;
752         for (tail = queue->tx_tail; tail != head; tail++) {
753                 struct macb_tx_skb      *tx_skb;
754                 struct sk_buff          *skb;
755                 struct macb_dma_desc    *desc;
756                 u32                     ctrl;
757 
758                 desc = macb_tx_desc(queue, tail);
759 
760                 /* Make hw descriptor updates visible to CPU */
761                 rmb();
762 
763                 ctrl = desc->ctrl;
764 
765                 /* TX_USED bit is only set by hardware on the very first buffer
766                  * descriptor of the transmitted frame.
767                  */
768                 if (!(ctrl & MACB_BIT(TX_USED)))
769                         break;
770 
771                 /* Process all buffers of the current transmitted frame */
772                 for (;; tail++) {
773                         tx_skb = macb_tx_skb(queue, tail);
774                         skb = tx_skb->skb;
775 
776                         /* First, update TX stats if needed */
777                         if (skb) {
778                                 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
779                                             macb_tx_ring_wrap(bp, tail),
780                                             skb->data);
781                                 bp->stats.tx_packets++;
782                                 bp->stats.tx_bytes += skb->len;
783                         }
784 
785                         /* Now we can safely release resources */
786                         macb_tx_unmap(bp, tx_skb);
787 
788                         /* skb is set only for the last buffer of the frame.
789                          * WARNING: at this point skb has been freed by
790                          * macb_tx_unmap().
791                          */
792                         if (skb)
793                                 break;
794                 }
795         }
796 
797         queue->tx_tail = tail;
798         if (__netif_subqueue_stopped(bp->dev, queue_index) &&
799             CIRC_CNT(queue->tx_head, queue->tx_tail,
800                      bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
801                 netif_wake_subqueue(bp->dev, queue_index);
802 }
803 
804 static void gem_rx_refill(struct macb *bp)
805 {
806         unsigned int            entry;
807         struct sk_buff          *skb;
808         dma_addr_t              paddr;
809         struct macb_dma_desc *desc;
810 
811         while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
812                           bp->rx_ring_size) > 0) {
813                 entry = macb_rx_ring_wrap(bp, bp->rx_prepared_head);
814 
815                 /* Make hw descriptor updates visible to CPU */
816                 rmb();
817 
818                 bp->rx_prepared_head++;
819                 desc = macb_rx_desc(bp, entry);
820 
821                 if (!bp->rx_skbuff[entry]) {
822                         /* allocate sk_buff for this free entry in ring */
823                         skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
824                         if (unlikely(!skb)) {
825                                 netdev_err(bp->dev,
826                                            "Unable to allocate sk_buff\n");
827                                 break;
828                         }
829 
830                         /* now fill corresponding descriptor entry */
831                         paddr = dma_map_single(&bp->pdev->dev, skb->data,
832                                                bp->rx_buffer_size,
833                                                DMA_FROM_DEVICE);
834                         if (dma_mapping_error(&bp->pdev->dev, paddr)) {
835                                 dev_kfree_skb(skb);
836                                 break;
837                         }
838 
839                         bp->rx_skbuff[entry] = skb;
840 
841                         if (entry == bp->rx_ring_size - 1)
842                                 paddr |= MACB_BIT(RX_WRAP);
843                         macb_set_addr(bp, desc, paddr);
844                         desc->ctrl = 0;
845 
846                         /* properly align Ethernet header */
847                         skb_reserve(skb, NET_IP_ALIGN);
848                 } else {
849                         desc->addr &= ~MACB_BIT(RX_USED);
850                         desc->ctrl = 0;
851                 }
852         }
853 
854         /* Make descriptor updates visible to hardware */
855         wmb();
856 
857         netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
858                     bp->rx_prepared_head, bp->rx_tail);
859 }
860 
861 /* Mark DMA descriptors from begin up to and not including end as unused */
862 static void discard_partial_frame(struct macb *bp, unsigned int begin,
863                                   unsigned int end)
864 {
865         unsigned int frag;
866 
867         for (frag = begin; frag != end; frag++) {
868                 struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
869 
870                 desc->addr &= ~MACB_BIT(RX_USED);
871         }
872 
873         /* Make descriptor updates visible to hardware */
874         wmb();
875 
876         /* When this happens, the hardware stats registers for
877          * whatever caused this is updated, so we don't have to record
878          * anything.
879          */
880 }
881 
882 static int gem_rx(struct macb *bp, int budget)
883 {
884         unsigned int            len;
885         unsigned int            entry;
886         struct sk_buff          *skb;
887         struct macb_dma_desc    *desc;
888         int                     count = 0;
889 
890         while (count < budget) {
891                 u32 ctrl;
892                 dma_addr_t addr;
893                 bool rxused;
894 
895                 entry = macb_rx_ring_wrap(bp, bp->rx_tail);
896                 desc = macb_rx_desc(bp, entry);
897 
898                 /* Make hw descriptor updates visible to CPU */
899                 rmb();
900 
901                 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
902                 addr = macb_get_addr(bp, desc);
903                 ctrl = desc->ctrl;
904 
905                 if (!rxused)
906                         break;
907 
908                 bp->rx_tail++;
909                 count++;
910 
911                 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
912                         netdev_err(bp->dev,
913                                    "not whole frame pointed by descriptor\n");
914                         bp->stats.rx_dropped++;
915                         break;
916                 }
917                 skb = bp->rx_skbuff[entry];
918                 if (unlikely(!skb)) {
919                         netdev_err(bp->dev,
920                                    "inconsistent Rx descriptor chain\n");
921                         bp->stats.rx_dropped++;
922                         break;
923                 }
924                 /* now everything is ready for receiving packet */
925                 bp->rx_skbuff[entry] = NULL;
926                 len = ctrl & bp->rx_frm_len_mask;
927 
928                 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
929 
930                 skb_put(skb, len);
931                 dma_unmap_single(&bp->pdev->dev, addr,
932                                  bp->rx_buffer_size, DMA_FROM_DEVICE);
933 
934                 skb->protocol = eth_type_trans(skb, bp->dev);
935                 skb_checksum_none_assert(skb);
936                 if (bp->dev->features & NETIF_F_RXCSUM &&
937                     !(bp->dev->flags & IFF_PROMISC) &&
938                     GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
939                         skb->ip_summed = CHECKSUM_UNNECESSARY;
940 
941                 bp->stats.rx_packets++;
942                 bp->stats.rx_bytes += skb->len;
943 
944 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
945                 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
946                             skb->len, skb->csum);
947                 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
948                                skb_mac_header(skb), 16, true);
949                 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
950                                skb->data, 32, true);
951 #endif
952 
953                 netif_receive_skb(skb);
954         }
955 
956         gem_rx_refill(bp);
957 
958         return count;
959 }
960 
961 static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
962                          unsigned int last_frag)
963 {
964         unsigned int len;
965         unsigned int frag;
966         unsigned int offset;
967         struct sk_buff *skb;
968         struct macb_dma_desc *desc;
969 
970         desc = macb_rx_desc(bp, last_frag);
971         len = desc->ctrl & bp->rx_frm_len_mask;
972 
973         netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
974                 macb_rx_ring_wrap(bp, first_frag),
975                 macb_rx_ring_wrap(bp, last_frag), len);
976 
977         /* The ethernet header starts NET_IP_ALIGN bytes into the
978          * first buffer. Since the header is 14 bytes, this makes the
979          * payload word-aligned.
980          *
981          * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
982          * the two padding bytes into the skb so that we avoid hitting
983          * the slowpath in memcpy(), and pull them off afterwards.
984          */
985         skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
986         if (!skb) {
987                 bp->stats.rx_dropped++;
988                 for (frag = first_frag; ; frag++) {
989                         desc = macb_rx_desc(bp, frag);
990                         desc->addr &= ~MACB_BIT(RX_USED);
991                         if (frag == last_frag)
992                                 break;
993                 }
994 
995                 /* Make descriptor updates visible to hardware */
996                 wmb();
997 
998                 return 1;
999         }
1000 
1001         offset = 0;
1002         len += NET_IP_ALIGN;
1003         skb_checksum_none_assert(skb);
1004         skb_put(skb, len);
1005 
1006         for (frag = first_frag; ; frag++) {
1007                 unsigned int frag_len = bp->rx_buffer_size;
1008 
1009                 if (offset + frag_len > len) {
1010                         if (unlikely(frag != last_frag)) {
1011                                 dev_kfree_skb_any(skb);
1012                                 return -1;
1013                         }
1014                         frag_len = len - offset;
1015                 }
1016                 skb_copy_to_linear_data_offset(skb, offset,
1017                                                macb_rx_buffer(bp, frag),
1018                                                frag_len);
1019                 offset += bp->rx_buffer_size;
1020                 desc = macb_rx_desc(bp, frag);
1021                 desc->addr &= ~MACB_BIT(RX_USED);
1022 
1023                 if (frag == last_frag)
1024                         break;
1025         }
1026 
1027         /* Make descriptor updates visible to hardware */
1028         wmb();
1029 
1030         __skb_pull(skb, NET_IP_ALIGN);
1031         skb->protocol = eth_type_trans(skb, bp->dev);
1032 
1033         bp->stats.rx_packets++;
1034         bp->stats.rx_bytes += skb->len;
1035         netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1036                     skb->len, skb->csum);
1037         netif_receive_skb(skb);
1038 
1039         return 0;
1040 }
1041 
1042 static inline void macb_init_rx_ring(struct macb *bp)
1043 {
1044         dma_addr_t addr;
1045         struct macb_dma_desc *desc = NULL;
1046         int i;
1047 
1048         addr = bp->rx_buffers_dma;
1049         for (i = 0; i < bp->rx_ring_size; i++) {
1050                 desc = macb_rx_desc(bp, i);
1051                 macb_set_addr(bp, desc, addr);
1052                 desc->ctrl = 0;
1053                 addr += bp->rx_buffer_size;
1054         }
1055         desc->addr |= MACB_BIT(RX_WRAP);
1056         bp->rx_tail = 0;
1057 }
1058 
1059 static int macb_rx(struct macb *bp, int budget)
1060 {
1061         bool reset_rx_queue = false;
1062         int received = 0;
1063         unsigned int tail;
1064         int first_frag = -1;
1065 
1066         for (tail = bp->rx_tail; budget > 0; tail++) {
1067                 struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
1068                 u32 ctrl;
1069 
1070                 /* Make hw descriptor updates visible to CPU */
1071                 rmb();
1072 
1073                 ctrl = desc->ctrl;
1074 
1075                 if (!(desc->addr & MACB_BIT(RX_USED)))
1076                         break;
1077 
1078                 if (ctrl & MACB_BIT(RX_SOF)) {
1079                         if (first_frag != -1)
1080                                 discard_partial_frame(bp, first_frag, tail);
1081                         first_frag = tail;
1082                 }
1083 
1084                 if (ctrl & MACB_BIT(RX_EOF)) {
1085                         int dropped;
1086 
1087                         if (unlikely(first_frag == -1)) {
1088                                 reset_rx_queue = true;
1089                                 continue;
1090                         }
1091 
1092                         dropped = macb_rx_frame(bp, first_frag, tail);
1093                         first_frag = -1;
1094                         if (unlikely(dropped < 0)) {
1095                                 reset_rx_queue = true;
1096                                 continue;
1097                         }
1098                         if (!dropped) {
1099                                 received++;
1100                                 budget--;
1101                         }
1102                 }
1103         }
1104 
1105         if (unlikely(reset_rx_queue)) {
1106                 unsigned long flags;
1107                 u32 ctrl;
1108 
1109                 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1110 
1111                 spin_lock_irqsave(&bp->lock, flags);
1112 
1113                 ctrl = macb_readl(bp, NCR);
1114                 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1115 
1116                 macb_init_rx_ring(bp);
1117                 macb_writel(bp, RBQP, bp->rx_ring_dma);
1118 
1119                 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1120 
1121                 spin_unlock_irqrestore(&bp->lock, flags);
1122                 return received;
1123         }
1124 
1125         if (first_frag != -1)
1126                 bp->rx_tail = first_frag;
1127         else
1128                 bp->rx_tail = tail;
1129 
1130         return received;
1131 }
1132 
1133 static int macb_poll(struct napi_struct *napi, int budget)
1134 {
1135         struct macb *bp = container_of(napi, struct macb, napi);
1136         int work_done;
1137         u32 status;
1138 
1139         status = macb_readl(bp, RSR);
1140         macb_writel(bp, RSR, status);
1141 
1142         work_done = 0;
1143 
1144         netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
1145                     (unsigned long)status, budget);
1146 
1147         work_done = bp->macbgem_ops.mog_rx(bp, budget);
1148         if (work_done < budget) {
1149                 napi_complete(napi);
1150 
1151                 /* Packets received while interrupts were disabled */
1152                 status = macb_readl(bp, RSR);
1153                 if (status) {
1154                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1155                                 macb_writel(bp, ISR, MACB_BIT(RCOMP));
1156                         napi_reschedule(napi);
1157                 } else {
1158                         macb_writel(bp, IER, MACB_RX_INT_FLAGS);
1159                 }
1160         }
1161 
1162         /* TODO: Handle errors */
1163 
1164         return work_done;
1165 }
1166 
1167 static irqreturn_t macb_interrupt(int irq, void *dev_id)
1168 {
1169         struct macb_queue *queue = dev_id;
1170         struct macb *bp = queue->bp;
1171         struct net_device *dev = bp->dev;
1172         u32 status, ctrl;
1173 
1174         status = queue_readl(queue, ISR);
1175 
1176         if (unlikely(!status))
1177                 return IRQ_NONE;
1178 
1179         spin_lock(&bp->lock);
1180 
1181         while (status) {
1182                 /* close possible race with dev_close */
1183                 if (unlikely(!netif_running(dev))) {
1184                         queue_writel(queue, IDR, -1);
1185                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1186                                 queue_writel(queue, ISR, -1);
1187                         break;
1188                 }
1189 
1190                 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1191                             (unsigned int)(queue - bp->queues),
1192                             (unsigned long)status);
1193 
1194                 if (status & MACB_RX_INT_FLAGS) {
1195                         /* There's no point taking any more interrupts
1196                          * until we have processed the buffers. The
1197                          * scheduling call may fail if the poll routine
1198                          * is already scheduled, so disable interrupts
1199                          * now.
1200                          */
1201                         queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
1202                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1203                                 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1204 
1205                         if (napi_schedule_prep(&bp->napi)) {
1206                                 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1207                                 __napi_schedule(&bp->napi);
1208                         }
1209                 }
1210 
1211                 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
1212                         queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1213                         schedule_work(&queue->tx_error_task);
1214 
1215                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1216                                 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1217 
1218                         break;
1219                 }
1220 
1221                 if (status & MACB_BIT(TCOMP))
1222                         macb_tx_interrupt(queue);
1223 
1224                 /* Link change detection isn't possible with RMII, so we'll
1225                  * add that if/when we get our hands on a full-blown MII PHY.
1226                  */
1227 
1228                 /* There is a hardware issue under heavy load where DMA can
1229                  * stop, this causes endless "used buffer descriptor read"
1230                  * interrupts but it can be cleared by re-enabling RX. See
1231                  * the at91 manual, section 41.3.1 or the Zynq manual
1232                  * section 16.7.4 for details.
1233                  */
1234                 if (status & MACB_BIT(RXUBR)) {
1235                         ctrl = macb_readl(bp, NCR);
1236                         macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1237                         wmb();
1238                         macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1239 
1240                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1241                                 queue_writel(queue, ISR, MACB_BIT(RXUBR));
1242                 }
1243 
1244                 if (status & MACB_BIT(ISR_ROVR)) {
1245                         /* We missed at least one packet */
1246                         if (macb_is_gem(bp))
1247                                 bp->hw_stats.gem.rx_overruns++;
1248                         else
1249                                 bp->hw_stats.macb.rx_overruns++;
1250 
1251                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1252                                 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
1253                 }
1254 
1255                 if (status & MACB_BIT(HRESP)) {
1256                         /* TODO: Reset the hardware, and maybe move the
1257                          * netdev_err to a lower-priority context as well
1258                          * (work queue?)
1259                          */
1260                         netdev_err(dev, "DMA bus error: HRESP not OK\n");
1261 
1262                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1263                                 queue_writel(queue, ISR, MACB_BIT(HRESP));
1264                 }
1265 
1266                 status = queue_readl(queue, ISR);
1267         }
1268 
1269         spin_unlock(&bp->lock);
1270 
1271         return IRQ_HANDLED;
1272 }
1273 
1274 #ifdef CONFIG_NET_POLL_CONTROLLER
1275 /* Polling receive - used by netconsole and other diagnostic tools
1276  * to allow network i/o with interrupts disabled.
1277  */
1278 static void macb_poll_controller(struct net_device *dev)
1279 {
1280         struct macb *bp = netdev_priv(dev);
1281         struct macb_queue *queue;
1282         unsigned long flags;
1283         unsigned int q;
1284 
1285         local_irq_save(flags);
1286         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1287                 macb_interrupt(dev->irq, queue);
1288         local_irq_restore(flags);
1289 }
1290 #endif
1291 
1292 static unsigned int macb_tx_map(struct macb *bp,
1293                                 struct macb_queue *queue,
1294                                 struct sk_buff *skb,
1295                                 unsigned int hdrlen)
1296 {
1297         dma_addr_t mapping;
1298         unsigned int len, entry, i, tx_head = queue->tx_head;
1299         struct macb_tx_skb *tx_skb = NULL;
1300         struct macb_dma_desc *desc;
1301         unsigned int offset, size, count = 0;
1302         unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
1303         unsigned int eof = 1, mss_mfs = 0;
1304         u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
1305 
1306         /* LSO */
1307         if (skb_shinfo(skb)->gso_size != 0) {
1308                 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1309                         /* UDP - UFO */
1310                         lso_ctrl = MACB_LSO_UFO_ENABLE;
1311                 else
1312                         /* TCP - TSO */
1313                         lso_ctrl = MACB_LSO_TSO_ENABLE;
1314         }
1315 
1316         /* First, map non-paged data */
1317         len = skb_headlen(skb);
1318 
1319         /* first buffer length */
1320         size = hdrlen;
1321 
1322         offset = 0;
1323         while (len) {
1324                 entry = macb_tx_ring_wrap(bp, tx_head);
1325                 tx_skb = &queue->tx_skb[entry];
1326 
1327                 mapping = dma_map_single(&bp->pdev->dev,
1328                                          skb->data + offset,
1329                                          size, DMA_TO_DEVICE);
1330                 if (dma_mapping_error(&bp->pdev->dev, mapping))
1331                         goto dma_error;
1332 
1333                 /* Save info to properly release resources */
1334                 tx_skb->skb = NULL;
1335                 tx_skb->mapping = mapping;
1336                 tx_skb->size = size;
1337                 tx_skb->mapped_as_page = false;
1338 
1339                 len -= size;
1340                 offset += size;
1341                 count++;
1342                 tx_head++;
1343 
1344                 size = min(len, bp->max_tx_length);
1345         }
1346 
1347         /* Then, map paged data from fragments */
1348         for (f = 0; f < nr_frags; f++) {
1349                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1350 
1351                 len = skb_frag_size(frag);
1352                 offset = 0;
1353                 while (len) {
1354                         size = min(len, bp->max_tx_length);
1355                         entry = macb_tx_ring_wrap(bp, tx_head);
1356                         tx_skb = &queue->tx_skb[entry];
1357 
1358                         mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
1359                                                    offset, size, DMA_TO_DEVICE);
1360                         if (dma_mapping_error(&bp->pdev->dev, mapping))
1361                                 goto dma_error;
1362 
1363                         /* Save info to properly release resources */
1364                         tx_skb->skb = NULL;
1365                         tx_skb->mapping = mapping;
1366                         tx_skb->size = size;
1367                         tx_skb->mapped_as_page = true;
1368 
1369                         len -= size;
1370                         offset += size;
1371                         count++;
1372                         tx_head++;
1373                 }
1374         }
1375 
1376         /* Should never happen */
1377         if (unlikely(!tx_skb)) {
1378                 netdev_err(bp->dev, "BUG! empty skb!\n");
1379                 return 0;
1380         }
1381 
1382         /* This is the last buffer of the frame: save socket buffer */
1383         tx_skb->skb = skb;
1384 
1385         /* Update TX ring: update buffer descriptors in reverse order
1386          * to avoid race condition
1387          */
1388 
1389         /* Set 'TX_USED' bit in buffer descriptor at tx_head position
1390          * to set the end of TX queue
1391          */
1392         i = tx_head;
1393         entry = macb_tx_ring_wrap(bp, i);
1394         ctrl = MACB_BIT(TX_USED);
1395         desc = macb_tx_desc(queue, entry);
1396         desc->ctrl = ctrl;
1397 
1398         if (lso_ctrl) {
1399                 if (lso_ctrl == MACB_LSO_UFO_ENABLE)
1400                         /* include header and FCS in value given to h/w */
1401                         mss_mfs = skb_shinfo(skb)->gso_size +
1402                                         skb_transport_offset(skb) +
1403                                         ETH_FCS_LEN;
1404                 else /* TSO */ {
1405                         mss_mfs = skb_shinfo(skb)->gso_size;
1406                         /* TCP Sequence Number Source Select
1407                          * can be set only for TSO
1408                          */
1409                         seq_ctrl = 0;
1410                 }
1411         }
1412 
1413         do {
1414                 i--;
1415                 entry = macb_tx_ring_wrap(bp, i);
1416                 tx_skb = &queue->tx_skb[entry];
1417                 desc = macb_tx_desc(queue, entry);
1418 
1419                 ctrl = (u32)tx_skb->size;
1420                 if (eof) {
1421                         ctrl |= MACB_BIT(TX_LAST);
1422                         eof = 0;
1423                 }
1424                 if (unlikely(entry == (bp->tx_ring_size - 1)))
1425                         ctrl |= MACB_BIT(TX_WRAP);
1426 
1427                 /* First descriptor is header descriptor */
1428                 if (i == queue->tx_head) {
1429                         ctrl |= MACB_BF(TX_LSO, lso_ctrl);
1430                         ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
1431                 } else
1432                         /* Only set MSS/MFS on payload descriptors
1433                          * (second or later descriptor)
1434                          */
1435                         ctrl |= MACB_BF(MSS_MFS, mss_mfs);
1436 
1437                 /* Set TX buffer descriptor */
1438                 macb_set_addr(bp, desc, tx_skb->mapping);
1439                 /* desc->addr must be visible to hardware before clearing
1440                  * 'TX_USED' bit in desc->ctrl.
1441                  */
1442                 wmb();
1443                 desc->ctrl = ctrl;
1444         } while (i != queue->tx_head);
1445 
1446         queue->tx_head = tx_head;
1447 
1448         return count;
1449 
1450 dma_error:
1451         netdev_err(bp->dev, "TX DMA map failed\n");
1452 
1453         for (i = queue->tx_head; i != tx_head; i++) {
1454                 tx_skb = macb_tx_skb(queue, i);
1455 
1456                 macb_tx_unmap(bp, tx_skb);
1457         }
1458 
1459         return 0;
1460 }
1461 
1462 static netdev_features_t macb_features_check(struct sk_buff *skb,
1463                                              struct net_device *dev,
1464                                              netdev_features_t features)
1465 {
1466         unsigned int nr_frags, f;
1467         unsigned int hdrlen;
1468 
1469         /* Validate LSO compatibility */
1470 
1471         /* there is only one buffer */
1472         if (!skb_is_nonlinear(skb))
1473                 return features;
1474 
1475         /* length of header */
1476         hdrlen = skb_transport_offset(skb);
1477         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1478                 hdrlen += tcp_hdrlen(skb);
1479 
1480         /* For LSO:
1481          * When software supplies two or more payload buffers all payload buffers
1482          * apart from the last must be a multiple of 8 bytes in size.
1483          */
1484         if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
1485                 return features & ~MACB_NETIF_LSO;
1486 
1487         nr_frags = skb_shinfo(skb)->nr_frags;
1488         /* No need to check last fragment */
1489         nr_frags--;
1490         for (f = 0; f < nr_frags; f++) {
1491                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1492 
1493                 if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
1494                         return features & ~MACB_NETIF_LSO;
1495         }
1496         return features;
1497 }
1498 
1499 static inline int macb_clear_csum(struct sk_buff *skb)
1500 {
1501         /* no change for packets without checksum offloading */
1502         if (skb->ip_summed != CHECKSUM_PARTIAL)
1503                 return 0;
1504 
1505         /* make sure we can modify the header */
1506         if (unlikely(skb_cow_head(skb, 0)))
1507                 return -1;
1508 
1509         /* initialize checksum field
1510          * This is required - at least for Zynq, which otherwise calculates
1511          * wrong UDP header checksums for UDP packets with UDP data len <=2
1512          */
1513         *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
1514         return 0;
1515 }
1516 
1517 static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1518 {
1519         u16 queue_index = skb_get_queue_mapping(skb);
1520         struct macb *bp = netdev_priv(dev);
1521         struct macb_queue *queue = &bp->queues[queue_index];
1522         unsigned long flags;
1523         unsigned int desc_cnt, nr_frags, frag_size, f;
1524         unsigned int hdrlen;
1525         bool is_lso, is_udp = 0;
1526 
1527         is_lso = (skb_shinfo(skb)->gso_size != 0);
1528 
1529         if (is_lso) {
1530                 is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP);
1531 
1532                 /* length of headers */
1533                 if (is_udp)
1534                         /* only queue eth + ip headers separately for UDP */
1535                         hdrlen = skb_transport_offset(skb);
1536                 else
1537                         hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
1538                 if (skb_headlen(skb) < hdrlen) {
1539                         netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
1540                         /* if this is required, would need to copy to single buffer */
1541                         return NETDEV_TX_BUSY;
1542                 }
1543         } else
1544                 hdrlen = min(skb_headlen(skb), bp->max_tx_length);
1545 
1546 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1547         netdev_vdbg(bp->dev,
1548                     "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1549                     queue_index, skb->len, skb->head, skb->data,
1550                     skb_tail_pointer(skb), skb_end_pointer(skb));
1551         print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
1552                        skb->data, 16, true);
1553 #endif
1554 
1555         /* Count how many TX buffer descriptors are needed to send this
1556          * socket buffer: skb fragments of jumbo frames may need to be
1557          * split into many buffer descriptors.
1558          */
1559         if (is_lso && (skb_headlen(skb) > hdrlen))
1560                 /* extra header descriptor if also payload in first buffer */
1561                 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
1562         else
1563                 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
1564         nr_frags = skb_shinfo(skb)->nr_frags;
1565         for (f = 0; f < nr_frags; f++) {
1566                 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
1567                 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
1568         }
1569 
1570         spin_lock_irqsave(&bp->lock, flags);
1571 
1572         /* This is a hard error, log it. */
1573         if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
1574                        bp->tx_ring_size) < desc_cnt) {
1575                 netif_stop_subqueue(dev, queue_index);
1576                 spin_unlock_irqrestore(&bp->lock, flags);
1577                 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
1578                            queue->tx_head, queue->tx_tail);
1579                 return NETDEV_TX_BUSY;
1580         }
1581 
1582         if (macb_clear_csum(skb)) {
1583                 dev_kfree_skb_any(skb);
1584                 goto unlock;
1585         }
1586 
1587         /* Map socket buffer for DMA transfer */
1588         if (!macb_tx_map(bp, queue, skb, hdrlen)) {
1589                 dev_kfree_skb_any(skb);
1590                 goto unlock;
1591         }
1592 
1593         /* Make newly initialized descriptor visible to hardware */
1594         wmb();
1595 
1596         skb_tx_timestamp(skb);
1597 
1598         macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1599 
1600         if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
1601                 netif_stop_subqueue(dev, queue_index);
1602 
1603 unlock:
1604         spin_unlock_irqrestore(&bp->lock, flags);
1605 
1606         return NETDEV_TX_OK;
1607 }
1608 
1609 static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
1610 {
1611         if (!macb_is_gem(bp)) {
1612                 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
1613         } else {
1614                 bp->rx_buffer_size = size;
1615 
1616                 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
1617                         netdev_dbg(bp->dev,
1618                                    "RX buffer must be multiple of %d bytes, expanding\n",
1619                                    RX_BUFFER_MULTIPLE);
1620                         bp->rx_buffer_size =
1621                                 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
1622                 }
1623         }
1624 
1625         netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n",
1626                    bp->dev->mtu, bp->rx_buffer_size);
1627 }
1628 
1629 static void gem_free_rx_buffers(struct macb *bp)
1630 {
1631         struct sk_buff          *skb;
1632         struct macb_dma_desc    *desc;
1633         dma_addr_t              addr;
1634         int i;
1635 
1636         if (!bp->rx_skbuff)
1637                 return;
1638 
1639         for (i = 0; i < bp->rx_ring_size; i++) {
1640                 skb = bp->rx_skbuff[i];
1641 
1642                 if (!skb)
1643                         continue;
1644 
1645                 desc = macb_rx_desc(bp, i);
1646                 addr = macb_get_addr(bp, desc);
1647 
1648                 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1649                                  DMA_FROM_DEVICE);
1650                 dev_kfree_skb_any(skb);
1651                 skb = NULL;
1652         }
1653 
1654         kfree(bp->rx_skbuff);
1655         bp->rx_skbuff = NULL;
1656 }
1657 
1658 static void macb_free_rx_buffers(struct macb *bp)
1659 {
1660         if (bp->rx_buffers) {
1661                 dma_free_coherent(&bp->pdev->dev,
1662                                   bp->rx_ring_size * bp->rx_buffer_size,
1663                                   bp->rx_buffers, bp->rx_buffers_dma);
1664                 bp->rx_buffers = NULL;
1665         }
1666 }
1667 
1668 static void macb_free_consistent(struct macb *bp)
1669 {
1670         struct macb_queue *queue;
1671         unsigned int q;
1672 
1673         bp->macbgem_ops.mog_free_rx_buffers(bp);
1674         if (bp->rx_ring) {
1675                 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
1676                                   bp->rx_ring, bp->rx_ring_dma);
1677                 bp->rx_ring = NULL;
1678         }
1679 
1680         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1681                 kfree(queue->tx_skb);
1682                 queue->tx_skb = NULL;
1683                 if (queue->tx_ring) {
1684                         dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp),
1685                                           queue->tx_ring, queue->tx_ring_dma);
1686                         queue->tx_ring = NULL;
1687                 }
1688         }
1689 }
1690 
1691 static int gem_alloc_rx_buffers(struct macb *bp)
1692 {
1693         int size;
1694 
1695         size = bp->rx_ring_size * sizeof(struct sk_buff *);
1696         bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
1697         if (!bp->rx_skbuff)
1698                 return -ENOMEM;
1699         else
1700                 netdev_dbg(bp->dev,
1701                            "Allocated %d RX struct sk_buff entries at %p\n",
1702                            bp->rx_ring_size, bp->rx_skbuff);
1703         return 0;
1704 }
1705 
1706 static int macb_alloc_rx_buffers(struct macb *bp)
1707 {
1708         int size;
1709 
1710         size = bp->rx_ring_size * bp->rx_buffer_size;
1711         bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
1712                                             &bp->rx_buffers_dma, GFP_KERNEL);
1713         if (!bp->rx_buffers)
1714                 return -ENOMEM;
1715 
1716         netdev_dbg(bp->dev,
1717                    "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1718                    size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
1719         return 0;
1720 }
1721 
1722 static int macb_alloc_consistent(struct macb *bp)
1723 {
1724         struct macb_queue *queue;
1725         unsigned int q;
1726         int size;
1727 
1728         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1729                 size = TX_RING_BYTES(bp);
1730                 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1731                                                     &queue->tx_ring_dma,
1732                                                     GFP_KERNEL);
1733                 if (!queue->tx_ring)
1734                         goto out_err;
1735                 netdev_dbg(bp->dev,
1736                            "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
1737                            q, size, (unsigned long)queue->tx_ring_dma,
1738                            queue->tx_ring);
1739 
1740                 size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
1741                 queue->tx_skb = kmalloc(size, GFP_KERNEL);
1742                 if (!queue->tx_skb)
1743                         goto out_err;
1744         }
1745 
1746         size = RX_RING_BYTES(bp);
1747         bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1748                                          &bp->rx_ring_dma, GFP_KERNEL);
1749         if (!bp->rx_ring)
1750                 goto out_err;
1751         netdev_dbg(bp->dev,
1752                    "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
1753                    size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
1754 
1755         if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
1756                 goto out_err;
1757 
1758         return 0;
1759 
1760 out_err:
1761         macb_free_consistent(bp);
1762         return -ENOMEM;
1763 }
1764 
1765 static void gem_init_rings(struct macb *bp)
1766 {
1767         struct macb_queue *queue;
1768         struct macb_dma_desc *desc = NULL;
1769         unsigned int q;
1770         int i;
1771 
1772         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1773                 for (i = 0; i < bp->tx_ring_size; i++) {
1774                         desc = macb_tx_desc(queue, i);
1775                         macb_set_addr(bp, desc, 0);
1776                         desc->ctrl = MACB_BIT(TX_USED);
1777                 }
1778                 desc->ctrl |= MACB_BIT(TX_WRAP);
1779                 queue->tx_head = 0;
1780                 queue->tx_tail = 0;
1781         }
1782 
1783         bp->rx_tail = 0;
1784         bp->rx_prepared_head = 0;
1785 
1786         gem_rx_refill(bp);
1787 }
1788 
1789 static void macb_init_rings(struct macb *bp)
1790 {
1791         int i;
1792         struct macb_dma_desc *desc = NULL;
1793 
1794         macb_init_rx_ring(bp);
1795 
1796         for (i = 0; i < bp->tx_ring_size; i++) {
1797                 desc = macb_tx_desc(&bp->queues[0], i);
1798                 macb_set_addr(bp, desc, 0);
1799                 desc->ctrl = MACB_BIT(TX_USED);
1800         }
1801         bp->queues[0].tx_head = 0;
1802         bp->queues[0].tx_tail = 0;
1803         desc->ctrl |= MACB_BIT(TX_WRAP);
1804 }
1805 
1806 static void macb_reset_hw(struct macb *bp)
1807 {
1808         struct macb_queue *queue;
1809         unsigned int q;
1810 
1811         /* Disable RX and TX (XXX: Should we halt the transmission
1812          * more gracefully?)
1813          */
1814         macb_writel(bp, NCR, 0);
1815 
1816         /* Clear the stats registers (XXX: Update stats first?) */
1817         macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
1818 
1819         /* Clear all status flags */
1820         macb_writel(bp, TSR, -1);
1821         macb_writel(bp, RSR, -1);
1822 
1823         /* Disable all interrupts */
1824         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1825                 queue_writel(queue, IDR, -1);
1826                 queue_readl(queue, ISR);
1827                 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1828                         queue_writel(queue, ISR, -1);
1829         }
1830 }
1831 
1832 static u32 gem_mdc_clk_div(struct macb *bp)
1833 {
1834         u32 config;
1835         unsigned long pclk_hz = clk_get_rate(bp->pclk);
1836 
1837         if (pclk_hz <= 20000000)
1838                 config = GEM_BF(CLK, GEM_CLK_DIV8);
1839         else if (pclk_hz <= 40000000)
1840                 config = GEM_BF(CLK, GEM_CLK_DIV16);
1841         else if (pclk_hz <= 80000000)
1842                 config = GEM_BF(CLK, GEM_CLK_DIV32);
1843         else if (pclk_hz <= 120000000)
1844                 config = GEM_BF(CLK, GEM_CLK_DIV48);
1845         else if (pclk_hz <= 160000000)
1846                 config = GEM_BF(CLK, GEM_CLK_DIV64);
1847         else
1848                 config = GEM_BF(CLK, GEM_CLK_DIV96);
1849 
1850         return config;
1851 }
1852 
1853 static u32 macb_mdc_clk_div(struct macb *bp)
1854 {
1855         u32 config;
1856         unsigned long pclk_hz;
1857 
1858         if (macb_is_gem(bp))
1859                 return gem_mdc_clk_div(bp);
1860 
1861         pclk_hz = clk_get_rate(bp->pclk);
1862         if (pclk_hz <= 20000000)
1863                 config = MACB_BF(CLK, MACB_CLK_DIV8);
1864         else if (pclk_hz <= 40000000)
1865                 config = MACB_BF(CLK, MACB_CLK_DIV16);
1866         else if (pclk_hz <= 80000000)
1867                 config = MACB_BF(CLK, MACB_CLK_DIV32);
1868         else
1869                 config = MACB_BF(CLK, MACB_CLK_DIV64);
1870 
1871         return config;
1872 }
1873 
1874 /* Get the DMA bus width field of the network configuration register that we
1875  * should program.  We find the width from decoding the design configuration
1876  * register to find the maximum supported data bus width.
1877  */
1878 static u32 macb_dbw(struct macb *bp)
1879 {
1880         if (!macb_is_gem(bp))
1881                 return 0;
1882 
1883         switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
1884         case 4:
1885                 return GEM_BF(DBW, GEM_DBW128);
1886         case 2:
1887                 return GEM_BF(DBW, GEM_DBW64);
1888         case 1:
1889         default:
1890                 return GEM_BF(DBW, GEM_DBW32);
1891         }
1892 }
1893 
1894 /* Configure the receive DMA engine
1895  * - use the correct receive buffer size
1896  * - set best burst length for DMA operations
1897  *   (if not supported by FIFO, it will fallback to default)
1898  * - set both rx/tx packet buffers to full memory size
1899  * These are configurable parameters for GEM.
1900  */
1901 static void macb_configure_dma(struct macb *bp)
1902 {
1903         u32 dmacfg;
1904 
1905         if (macb_is_gem(bp)) {
1906                 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
1907                 dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
1908                 if (bp->dma_burst_length)
1909                         dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
1910                 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
1911                 dmacfg &= ~GEM_BIT(ENDIA_PKT);
1912 
1913                 if (bp->native_io)
1914                         dmacfg &= ~GEM_BIT(ENDIA_DESC);
1915                 else
1916                         dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
1917 
1918                 if (bp->dev->features & NETIF_F_HW_CSUM)
1919                         dmacfg |= GEM_BIT(TXCOEN);
1920                 else
1921                         dmacfg &= ~GEM_BIT(TXCOEN);
1922 
1923 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1924                 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
1925                         dmacfg |= GEM_BIT(ADDR64);
1926 #endif
1927                 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
1928                            dmacfg);
1929                 gem_writel(bp, DMACFG, dmacfg);
1930         }
1931 }
1932 
1933 static void macb_init_hw(struct macb *bp)
1934 {
1935         struct macb_queue *queue;
1936         unsigned int q;
1937 
1938         u32 config;
1939 
1940         macb_reset_hw(bp);
1941         macb_set_hwaddr(bp);
1942 
1943         config = macb_mdc_clk_div(bp);
1944         if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
1945                 config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
1946         config |= MACB_BF(RBOF, NET_IP_ALIGN);  /* Make eth data aligned */
1947         config |= MACB_BIT(PAE);                /* PAuse Enable */
1948         config |= MACB_BIT(DRFCS);              /* Discard Rx FCS */
1949         if (bp->caps & MACB_CAPS_JUMBO)
1950                 config |= MACB_BIT(JFRAME);     /* Enable jumbo frames */
1951         else
1952                 config |= MACB_BIT(BIG);        /* Receive oversized frames */
1953         if (bp->dev->flags & IFF_PROMISC)
1954                 config |= MACB_BIT(CAF);        /* Copy All Frames */
1955         else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
1956                 config |= GEM_BIT(RXCOEN);
1957         if (!(bp->dev->flags & IFF_BROADCAST))
1958                 config |= MACB_BIT(NBC);        /* No BroadCast */
1959         config |= macb_dbw(bp);
1960         macb_writel(bp, NCFGR, config);
1961         if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
1962                 gem_writel(bp, JML, bp->jumbo_max_len);
1963         bp->speed = SPEED_10;
1964         bp->duplex = DUPLEX_HALF;
1965         bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
1966         if (bp->caps & MACB_CAPS_JUMBO)
1967                 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
1968 
1969         macb_configure_dma(bp);
1970 
1971         /* Initialize TX and RX buffers */
1972         macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
1973 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1974         if (bp->hw_dma_cap == HW_DMA_CAP_64B)
1975                 macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
1976 #endif
1977         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1978                 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
1979 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1980                 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
1981                         queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
1982 #endif
1983 
1984                 /* Enable interrupts */
1985                 queue_writel(queue, IER,
1986                              MACB_RX_INT_FLAGS |
1987                              MACB_TX_INT_FLAGS |
1988                              MACB_BIT(HRESP));
1989         }
1990 
1991         /* Enable TX and RX */
1992         macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
1993 }
1994 
1995 /* The hash address register is 64 bits long and takes up two
1996  * locations in the memory map.  The least significant bits are stored
1997  * in EMAC_HSL and the most significant bits in EMAC_HSH.
1998  *
1999  * The unicast hash enable and the multicast hash enable bits in the
2000  * network configuration register enable the reception of hash matched
2001  * frames. The destination address is reduced to a 6 bit index into
2002  * the 64 bit hash register using the following hash function.  The
2003  * hash function is an exclusive or of every sixth bit of the
2004  * destination address.
2005  *
2006  * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
2007  * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
2008  * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
2009  * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
2010  * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
2011  * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
2012  *
2013  * da[0] represents the least significant bit of the first byte
2014  * received, that is, the multicast/unicast indicator, and da[47]
2015  * represents the most significant bit of the last byte received.  If
2016  * the hash index, hi[n], points to a bit that is set in the hash
2017  * register then the frame will be matched according to whether the
2018  * frame is multicast or unicast.  A multicast match will be signalled
2019  * if the multicast hash enable bit is set, da[0] is 1 and the hash
2020  * index points to a bit set in the hash register.  A unicast match
2021  * will be signalled if the unicast hash enable bit is set, da[0] is 0
2022  * and the hash index points to a bit set in the hash register.  To
2023  * receive all multicast frames, the hash register should be set with
2024  * all ones and the multicast hash enable bit should be set in the
2025  * network configuration register.
2026  */
2027 
2028 static inline int hash_bit_value(int bitnr, __u8 *addr)
2029 {
2030         if (addr[bitnr / 8] & (1 << (bitnr % 8)))
2031                 return 1;
2032         return 0;
2033 }
2034 
2035 /* Return the hash index value for the specified address. */
2036 static int hash_get_index(__u8 *addr)
2037 {
2038         int i, j, bitval;
2039         int hash_index = 0;
2040 
2041         for (j = 0; j < 6; j++) {
2042                 for (i = 0, bitval = 0; i < 8; i++)
2043                         bitval ^= hash_bit_value(i * 6 + j, addr);
2044 
2045                 hash_index |= (bitval << j);
2046         }
2047 
2048         return hash_index;
2049 }
2050 
2051 /* Add multicast addresses to the internal multicast-hash table. */
2052 static void macb_sethashtable(struct net_device *dev)
2053 {
2054         struct netdev_hw_addr *ha;
2055         unsigned long mc_filter[2];
2056         unsigned int bitnr;
2057         struct macb *bp = netdev_priv(dev);
2058 
2059         mc_filter[0] = 0;
2060         mc_filter[1] = 0;
2061 
2062         netdev_for_each_mc_addr(ha, dev) {
2063                 bitnr = hash_get_index(ha->addr);
2064                 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
2065         }
2066 
2067         macb_or_gem_writel(bp, HRB, mc_filter[0]);
2068         macb_or_gem_writel(bp, HRT, mc_filter[1]);
2069 }
2070 
2071 /* Enable/Disable promiscuous and multicast modes. */
2072 static void macb_set_rx_mode(struct net_device *dev)
2073 {
2074         unsigned long cfg;
2075         struct macb *bp = netdev_priv(dev);
2076 
2077         cfg = macb_readl(bp, NCFGR);
2078 
2079         if (dev->flags & IFF_PROMISC) {
2080                 /* Enable promiscuous mode */
2081                 cfg |= MACB_BIT(CAF);
2082 
2083                 /* Disable RX checksum offload */
2084                 if (macb_is_gem(bp))
2085                         cfg &= ~GEM_BIT(RXCOEN);
2086         } else {
2087                 /* Disable promiscuous mode */
2088                 cfg &= ~MACB_BIT(CAF);
2089 
2090                 /* Enable RX checksum offload only if requested */
2091                 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
2092                         cfg |= GEM_BIT(RXCOEN);
2093         }
2094 
2095         if (dev->flags & IFF_ALLMULTI) {
2096                 /* Enable all multicast mode */
2097                 macb_or_gem_writel(bp, HRB, -1);
2098                 macb_or_gem_writel(bp, HRT, -1);
2099                 cfg |= MACB_BIT(NCFGR_MTI);
2100         } else if (!netdev_mc_empty(dev)) {
2101                 /* Enable specific multicasts */
2102                 macb_sethashtable(dev);
2103                 cfg |= MACB_BIT(NCFGR_MTI);
2104         } else if (dev->flags & (~IFF_ALLMULTI)) {
2105                 /* Disable all multicast mode */
2106                 macb_or_gem_writel(bp, HRB, 0);
2107                 macb_or_gem_writel(bp, HRT, 0);
2108                 cfg &= ~MACB_BIT(NCFGR_MTI);
2109         }
2110 
2111         macb_writel(bp, NCFGR, cfg);
2112 }
2113 
2114 static int macb_open(struct net_device *dev)
2115 {
2116         struct macb *bp = netdev_priv(dev);
2117         size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
2118         int err;
2119 
2120         netdev_dbg(bp->dev, "open\n");
2121 
2122         /* carrier starts down */
2123         netif_carrier_off(dev);
2124 
2125         /* if the phy is not yet register, retry later*/
2126         if (!dev->phydev)
2127                 return -EAGAIN;
2128 
2129         /* RX buffers initialization */
2130         macb_init_rx_buffer_size(bp, bufsz);
2131 
2132         err = macb_alloc_consistent(bp);
2133         if (err) {
2134                 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
2135                            err);
2136                 return err;
2137         }
2138 
2139         napi_enable(&bp->napi);
2140 
2141         bp->macbgem_ops.mog_init_rings(bp);
2142         macb_init_hw(bp);
2143 
2144         /* schedule a link state check */
2145         phy_start(dev->phydev);
2146 
2147         netif_tx_start_all_queues(dev);
2148 
2149         return 0;
2150 }
2151 
2152 static int macb_close(struct net_device *dev)
2153 {
2154         struct macb *bp = netdev_priv(dev);
2155         unsigned long flags;
2156 
2157         netif_tx_stop_all_queues(dev);
2158         napi_disable(&bp->napi);
2159 
2160         if (dev->phydev)
2161                 phy_stop(dev->phydev);
2162 
2163         spin_lock_irqsave(&bp->lock, flags);
2164         macb_reset_hw(bp);
2165         netif_carrier_off(dev);
2166         spin_unlock_irqrestore(&bp->lock, flags);
2167 
2168         macb_free_consistent(bp);
2169 
2170         return 0;
2171 }
2172 
2173 static int macb_change_mtu(struct net_device *dev, int new_mtu)
2174 {
2175         if (netif_running(dev))
2176                 return -EBUSY;
2177 
2178         dev->mtu = new_mtu;
2179 
2180         return 0;
2181 }
2182 
2183 static void gem_update_stats(struct macb *bp)
2184 {
2185         unsigned int i;
2186         u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
2187 
2188         for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
2189                 u32 offset = gem_statistics[i].offset;
2190                 u64 val = bp->macb_reg_readl(bp, offset);
2191 
2192                 bp->ethtool_stats[i] += val;
2193                 *p += val;
2194 
2195                 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
2196                         /* Add GEM_OCTTXH, GEM_OCTRXH */
2197                         val = bp->macb_reg_readl(bp, offset + 4);
2198                         bp->ethtool_stats[i] += ((u64)val) << 32;
2199                         *(++p) += val;
2200                 }
2201         }
2202 }
2203 
2204 static struct net_device_stats *gem_get_stats(struct macb *bp)
2205 {
2206         struct gem_stats *hwstat = &bp->hw_stats.gem;
2207         struct net_device_stats *nstat = &bp->stats;
2208 
2209         gem_update_stats(bp);
2210 
2211         nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
2212                             hwstat->rx_alignment_errors +
2213                             hwstat->rx_resource_errors +
2214                             hwstat->rx_overruns +
2215                             hwstat->rx_oversize_frames +
2216                             hwstat->rx_jabbers +
2217                             hwstat->rx_undersized_frames +
2218                             hwstat->rx_length_field_frame_errors);
2219         nstat->tx_errors = (hwstat->tx_late_collisions +
2220                             hwstat->tx_excessive_collisions +
2221                             hwstat->tx_underrun +
2222                             hwstat->tx_carrier_sense_errors);
2223         nstat->multicast = hwstat->rx_multicast_frames;
2224         nstat->collisions = (hwstat->tx_single_collision_frames +
2225                              hwstat->tx_multiple_collision_frames +
2226                              hwstat->tx_excessive_collisions);
2227         nstat->rx_length_errors = (hwstat->rx_oversize_frames +
2228                                    hwstat->rx_jabbers +
2229                                    hwstat->rx_undersized_frames +
2230                                    hwstat->rx_length_field_frame_errors);
2231         nstat->rx_over_errors = hwstat->rx_resource_errors;
2232         nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
2233         nstat->rx_frame_errors = hwstat->rx_alignment_errors;
2234         nstat->rx_fifo_errors = hwstat->rx_overruns;
2235         nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
2236         nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
2237         nstat->tx_fifo_errors = hwstat->tx_underrun;
2238 
2239         return nstat;
2240 }
2241 
2242 static void gem_get_ethtool_stats(struct net_device *dev,
2243                                   struct ethtool_stats *stats, u64 *data)
2244 {
2245         struct macb *bp;
2246 
2247         bp = netdev_priv(dev);
2248         gem_update_stats(bp);
2249         memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
2250 }
2251 
2252 static int gem_get_sset_count(struct net_device *dev, int sset)
2253 {
2254         switch (sset) {
2255         case ETH_SS_STATS:
2256                 return GEM_STATS_LEN;
2257         default:
2258                 return -EOPNOTSUPP;
2259         }
2260 }
2261 
2262 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
2263 {
2264         unsigned int i;
2265 
2266         switch (sset) {
2267         case ETH_SS_STATS:
2268                 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
2269                         memcpy(p, gem_statistics[i].stat_string,
2270                                ETH_GSTRING_LEN);
2271                 break;
2272         }
2273 }
2274 
2275 static struct net_device_stats *macb_get_stats(struct net_device *dev)
2276 {
2277         struct macb *bp = netdev_priv(dev);
2278         struct net_device_stats *nstat = &bp->stats;
2279         struct macb_stats *hwstat = &bp->hw_stats.macb;
2280 
2281         if (macb_is_gem(bp))
2282                 return gem_get_stats(bp);
2283 
2284         /* read stats from hardware */
2285         macb_update_stats(bp);
2286 
2287         /* Convert HW stats into netdevice stats */
2288         nstat->rx_errors = (hwstat->rx_fcs_errors +
2289                             hwstat->rx_align_errors +
2290                             hwstat->rx_resource_errors +
2291                             hwstat->rx_overruns +
2292                             hwstat->rx_oversize_pkts +
2293                             hwstat->rx_jabbers +
2294                             hwstat->rx_undersize_pkts +
2295                             hwstat->rx_length_mismatch);
2296         nstat->tx_errors = (hwstat->tx_late_cols +
2297                             hwstat->tx_excessive_cols +
2298                             hwstat->tx_underruns +
2299                             hwstat->tx_carrier_errors +
2300                             hwstat->sqe_test_errors);
2301         nstat->collisions = (hwstat->tx_single_cols +
2302                              hwstat->tx_multiple_cols +
2303                              hwstat->tx_excessive_cols);
2304         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
2305                                    hwstat->rx_jabbers +
2306                                    hwstat->rx_undersize_pkts +
2307                                    hwstat->rx_length_mismatch);
2308         nstat->rx_over_errors = hwstat->rx_resource_errors +
2309                                    hwstat->rx_overruns;
2310         nstat->rx_crc_errors = hwstat->rx_fcs_errors;
2311         nstat->rx_frame_errors = hwstat->rx_align_errors;
2312         nstat->rx_fifo_errors = hwstat->rx_overruns;
2313         /* XXX: What does "missed" mean? */
2314         nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
2315         nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
2316         nstat->tx_fifo_errors = hwstat->tx_underruns;
2317         /* Don't know about heartbeat or window errors... */
2318 
2319         return nstat;
2320 }
2321 
2322 static int macb_get_regs_len(struct net_device *netdev)
2323 {
2324         return MACB_GREGS_NBR * sizeof(u32);
2325 }
2326 
2327 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2328                           void *p)
2329 {
2330         struct macb *bp = netdev_priv(dev);
2331         unsigned int tail, head;
2332         u32 *regs_buff = p;
2333 
2334         regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
2335                         | MACB_GREGS_VERSION;
2336 
2337         tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
2338         head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
2339 
2340         regs_buff[0]  = macb_readl(bp, NCR);
2341         regs_buff[1]  = macb_or_gem_readl(bp, NCFGR);
2342         regs_buff[2]  = macb_readl(bp, NSR);
2343         regs_buff[3]  = macb_readl(bp, TSR);
2344         regs_buff[4]  = macb_readl(bp, RBQP);
2345         regs_buff[5]  = macb_readl(bp, TBQP);
2346         regs_buff[6]  = macb_readl(bp, RSR);
2347         regs_buff[7]  = macb_readl(bp, IMR);
2348 
2349         regs_buff[8]  = tail;
2350         regs_buff[9]  = head;
2351         regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
2352         regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
2353 
2354         if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
2355                 regs_buff[12] = macb_or_gem_readl(bp, USRIO);
2356         if (macb_is_gem(bp))
2357                 regs_buff[13] = gem_readl(bp, DMACFG);
2358 }
2359 
2360 static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2361 {
2362         struct macb *bp = netdev_priv(netdev);
2363 
2364         wol->supported = 0;
2365         wol->wolopts = 0;
2366 
2367         if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
2368                 wol->supported = WAKE_MAGIC;
2369 
2370                 if (bp->wol & MACB_WOL_ENABLED)
2371                         wol->wolopts |= WAKE_MAGIC;
2372         }
2373 }
2374 
2375 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2376 {
2377         struct macb *bp = netdev_priv(netdev);
2378 
2379         if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
2380             (wol->wolopts & ~WAKE_MAGIC))
2381                 return -EOPNOTSUPP;
2382 
2383         if (wol->wolopts & WAKE_MAGIC)
2384                 bp->wol |= MACB_WOL_ENABLED;
2385         else
2386                 bp->wol &= ~MACB_WOL_ENABLED;
2387 
2388         device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
2389 
2390         return 0;
2391 }
2392 
2393 static void macb_get_ringparam(struct net_device *netdev,
2394                                struct ethtool_ringparam *ring)
2395 {
2396         struct macb *bp = netdev_priv(netdev);
2397 
2398         ring->rx_max_pending = MAX_RX_RING_SIZE;
2399         ring->tx_max_pending = MAX_TX_RING_SIZE;
2400 
2401         ring->rx_pending = bp->rx_ring_size;
2402         ring->tx_pending = bp->tx_ring_size;
2403 }
2404 
2405 static int macb_set_ringparam(struct net_device *netdev,
2406                               struct ethtool_ringparam *ring)
2407 {
2408         struct macb *bp = netdev_priv(netdev);
2409         u32 new_rx_size, new_tx_size;
2410         unsigned int reset = 0;
2411 
2412         if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2413                 return -EINVAL;
2414 
2415         new_rx_size = clamp_t(u32, ring->rx_pending,
2416                               MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
2417         new_rx_size = roundup_pow_of_two(new_rx_size);
2418 
2419         new_tx_size = clamp_t(u32, ring->tx_pending,
2420                               MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
2421         new_tx_size = roundup_pow_of_two(new_tx_size);
2422 
2423         if ((new_tx_size == bp->tx_ring_size) &&
2424             (new_rx_size == bp->rx_ring_size)) {
2425                 /* nothing to do */
2426                 return 0;
2427         }
2428 
2429         if (netif_running(bp->dev)) {
2430                 reset = 1;
2431                 macb_close(bp->dev);
2432         }
2433 
2434         bp->rx_ring_size = new_rx_size;
2435         bp->tx_ring_size = new_tx_size;
2436 
2437         if (reset)
2438                 macb_open(bp->dev);
2439 
2440         return 0;
2441 }
2442 
2443 static const struct ethtool_ops macb_ethtool_ops = {
2444         .get_regs_len           = macb_get_regs_len,
2445         .get_regs               = macb_get_regs,
2446         .get_link               = ethtool_op_get_link,
2447         .get_ts_info            = ethtool_op_get_ts_info,
2448         .get_wol                = macb_get_wol,
2449         .set_wol                = macb_set_wol,
2450         .get_link_ksettings     = phy_ethtool_get_link_ksettings,
2451         .set_link_ksettings     = phy_ethtool_set_link_ksettings,
2452         .get_ringparam          = macb_get_ringparam,
2453         .set_ringparam          = macb_set_ringparam,
2454 };
2455 
2456 static const struct ethtool_ops gem_ethtool_ops = {
2457         .get_regs_len           = macb_get_regs_len,
2458         .get_regs               = macb_get_regs,
2459         .get_link               = ethtool_op_get_link,
2460         .get_ts_info            = ethtool_op_get_ts_info,
2461         .get_ethtool_stats      = gem_get_ethtool_stats,
2462         .get_strings            = gem_get_ethtool_strings,
2463         .get_sset_count         = gem_get_sset_count,
2464         .get_link_ksettings     = phy_ethtool_get_link_ksettings,
2465         .set_link_ksettings     = phy_ethtool_set_link_ksettings,
2466         .get_ringparam          = macb_get_ringparam,
2467         .set_ringparam          = macb_set_ringparam,
2468 };
2469 
2470 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2471 {
2472         struct phy_device *phydev = dev->phydev;
2473 
2474         if (!netif_running(dev))
2475                 return -EINVAL;
2476 
2477         if (!phydev)
2478                 return -ENODEV;
2479 
2480         return phy_mii_ioctl(phydev, rq, cmd);
2481 }
2482 
2483 static int macb_set_features(struct net_device *netdev,
2484                              netdev_features_t features)
2485 {
2486         struct macb *bp = netdev_priv(netdev);
2487         netdev_features_t changed = features ^ netdev->features;
2488 
2489         /* TX checksum offload */
2490         if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
2491                 u32 dmacfg;
2492 
2493                 dmacfg = gem_readl(bp, DMACFG);
2494                 if (features & NETIF_F_HW_CSUM)
2495                         dmacfg |= GEM_BIT(TXCOEN);
2496                 else
2497                         dmacfg &= ~GEM_BIT(TXCOEN);
2498                 gem_writel(bp, DMACFG, dmacfg);
2499         }
2500 
2501         /* RX checksum offload */
2502         if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
2503                 u32 netcfg;
2504 
2505                 netcfg = gem_readl(bp, NCFGR);
2506                 if (features & NETIF_F_RXCSUM &&
2507                     !(netdev->flags & IFF_PROMISC))
2508                         netcfg |= GEM_BIT(RXCOEN);
2509                 else
2510                         netcfg &= ~GEM_BIT(RXCOEN);
2511                 gem_writel(bp, NCFGR, netcfg);
2512         }
2513 
2514         return 0;
2515 }
2516 
2517 static const struct net_device_ops macb_netdev_ops = {
2518         .ndo_open               = macb_open,
2519         .ndo_stop               = macb_close,
2520         .ndo_start_xmit         = macb_start_xmit,
2521         .ndo_set_rx_mode        = macb_set_rx_mode,
2522         .ndo_get_stats          = macb_get_stats,
2523         .ndo_do_ioctl           = macb_ioctl,
2524         .ndo_validate_addr      = eth_validate_addr,
2525         .ndo_change_mtu         = macb_change_mtu,
2526         .ndo_set_mac_address    = eth_mac_addr,
2527 #ifdef CONFIG_NET_POLL_CONTROLLER
2528         .ndo_poll_controller    = macb_poll_controller,
2529 #endif
2530         .ndo_set_features       = macb_set_features,
2531         .ndo_features_check     = macb_features_check,
2532 };
2533 
2534 /* Configure peripheral capabilities according to device tree
2535  * and integration options used
2536  */
2537 static void macb_configure_caps(struct macb *bp,
2538                                 const struct macb_config *dt_conf)
2539 {
2540         u32 dcfg;
2541 
2542         if (dt_conf)
2543                 bp->caps = dt_conf->caps;
2544 
2545         if (hw_is_gem(bp->regs, bp->native_io)) {
2546                 bp->caps |= MACB_CAPS_MACB_IS_GEM;
2547 
2548                 dcfg = gem_readl(bp, DCFG1);
2549                 if (GEM_BFEXT(IRQCOR, dcfg) == 0)
2550                         bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
2551                 dcfg = gem_readl(bp, DCFG2);
2552                 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
2553                         bp->caps |= MACB_CAPS_FIFO_MODE;
2554         }
2555 
2556         dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
2557 }
2558 
2559 static void macb_probe_queues(void __iomem *mem,
2560                               bool native_io,
2561                               unsigned int *queue_mask,
2562                               unsigned int *num_queues)
2563 {
2564         unsigned int hw_q;
2565 
2566         *queue_mask = 0x1;
2567         *num_queues = 1;
2568 
2569         /* is it macb or gem ?
2570          *
2571          * We need to read directly from the hardware here because
2572          * we are early in the probe process and don't have the
2573          * MACB_CAPS_MACB_IS_GEM flag positioned
2574          */
2575         if (!hw_is_gem(mem, native_io))
2576                 return;
2577 
2578         /* bit 0 is never set but queue 0 always exists */
2579         *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
2580 
2581         *queue_mask |= 0x1;
2582 
2583         for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
2584                 if (*queue_mask & (1 << hw_q))
2585                         (*num_queues)++;
2586 }
2587 
2588 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
2589                          struct clk **hclk, struct clk **tx_clk,
2590                          struct clk **rx_clk)
2591 {
2592         struct macb_platform_data *pdata;
2593         int err;
2594 
2595         pdata = dev_get_platdata(&pdev->dev);
2596         if (pdata) {
2597                 *pclk = pdata->pclk;
2598                 *hclk = pdata->hclk;
2599         } else {
2600                 *pclk = devm_clk_get(&pdev->dev, "pclk");
2601                 *hclk = devm_clk_get(&pdev->dev, "hclk");
2602         }
2603 
2604         if (IS_ERR(*pclk)) {
2605                 err = PTR_ERR(*pclk);
2606                 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
2607                 return err;
2608         }
2609 
2610         if (IS_ERR(*hclk)) {
2611                 err = PTR_ERR(*hclk);
2612                 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
2613                 return err;
2614         }
2615 
2616         *tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
2617         if (IS_ERR(*tx_clk))
2618                 *tx_clk = NULL;
2619 
2620         *rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
2621         if (IS_ERR(*rx_clk))
2622                 *rx_clk = NULL;
2623 
2624         err = clk_prepare_enable(*pclk);
2625         if (err) {
2626                 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
2627                 return err;
2628         }
2629 
2630         err = clk_prepare_enable(*hclk);
2631         if (err) {
2632                 dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
2633                 goto err_disable_pclk;
2634         }
2635 
2636         err = clk_prepare_enable(*tx_clk);
2637         if (err) {
2638                 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
2639                 goto err_disable_hclk;
2640         }
2641 
2642         err = clk_prepare_enable(*rx_clk);
2643         if (err) {
2644                 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
2645                 goto err_disable_txclk;
2646         }
2647 
2648         return 0;
2649 
2650 err_disable_txclk:
2651         clk_disable_unprepare(*tx_clk);
2652 
2653 err_disable_hclk:
2654         clk_disable_unprepare(*hclk);
2655 
2656 err_disable_pclk:
2657         clk_disable_unprepare(*pclk);
2658 
2659         return err;
2660 }
2661 
2662 static int macb_init(struct platform_device *pdev)
2663 {
2664         struct net_device *dev = platform_get_drvdata(pdev);
2665         unsigned int hw_q, q;
2666         struct macb *bp = netdev_priv(dev);
2667         struct macb_queue *queue;
2668         int err;
2669         u32 val;
2670 
2671         bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
2672         bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
2673 
2674         /* set the queue register mapping once for all: queue0 has a special
2675          * register mapping but we don't want to test the queue index then
2676          * compute the corresponding register offset at run time.
2677          */
2678         for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
2679                 if (!(bp->queue_mask & (1 << hw_q)))
2680                         continue;
2681 
2682                 queue = &bp->queues[q];
2683                 queue->bp = bp;
2684                 if (hw_q) {
2685                         queue->ISR  = GEM_ISR(hw_q - 1);
2686                         queue->IER  = GEM_IER(hw_q - 1);
2687                         queue->IDR  = GEM_IDR(hw_q - 1);
2688                         queue->IMR  = GEM_IMR(hw_q - 1);
2689                         queue->TBQP = GEM_TBQP(hw_q - 1);
2690 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2691                         if (bp->hw_dma_cap == HW_DMA_CAP_64B)
2692                                 queue->TBQPH = GEM_TBQPH(hw_q - 1);
2693 #endif
2694                 } else {
2695                         /* queue0 uses legacy registers */
2696                         queue->ISR  = MACB_ISR;
2697                         queue->IER  = MACB_IER;
2698                         queue->IDR  = MACB_IDR;
2699                         queue->IMR  = MACB_IMR;
2700                         queue->TBQP = MACB_TBQP;
2701 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2702                         if (bp->hw_dma_cap == HW_DMA_CAP_64B)
2703                                 queue->TBQPH = MACB_TBQPH;
2704 #endif
2705                 }
2706 
2707                 /* get irq: here we use the linux queue index, not the hardware
2708                  * queue index. the queue irq definitions in the device tree
2709                  * must remove the optional gaps that could exist in the
2710                  * hardware queue mask.
2711                  */
2712                 queue->irq = platform_get_irq(pdev, q);
2713                 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
2714                                        IRQF_SHARED, dev->name, queue);
2715                 if (err) {
2716                         dev_err(&pdev->dev,
2717                                 "Unable to request IRQ %d (error %d)\n",
2718                                 queue->irq, err);
2719                         return err;
2720                 }
2721 
2722                 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
2723                 q++;
2724         }
2725 
2726         dev->netdev_ops = &macb_netdev_ops;
2727         netif_napi_add(dev, &bp->napi, macb_poll, 64);
2728 
2729         /* setup appropriated routines according to adapter type */
2730         if (macb_is_gem(bp)) {
2731                 bp->max_tx_length = GEM_MAX_TX_LEN;
2732                 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
2733                 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
2734                 bp->macbgem_ops.mog_init_rings = gem_init_rings;
2735                 bp->macbgem_ops.mog_rx = gem_rx;
2736                 dev->ethtool_ops = &gem_ethtool_ops;
2737         } else {
2738                 bp->max_tx_length = MACB_MAX_TX_LEN;
2739                 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
2740                 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
2741                 bp->macbgem_ops.mog_init_rings = macb_init_rings;
2742                 bp->macbgem_ops.mog_rx = macb_rx;
2743                 dev->ethtool_ops = &macb_ethtool_ops;
2744         }
2745 
2746         /* Set features */
2747         dev->hw_features = NETIF_F_SG;
2748 
2749         /* Check LSO capability */
2750         if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
2751                 dev->hw_features |= MACB_NETIF_LSO;
2752 
2753         /* Checksum offload is only available on gem with packet buffer */
2754         if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
2755                 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2756         if (bp->caps & MACB_CAPS_SG_DISABLED)
2757                 dev->hw_features &= ~NETIF_F_SG;
2758         dev->features = dev->hw_features;
2759 
2760         if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
2761                 val = 0;
2762                 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
2763                         val = GEM_BIT(RGMII);
2764                 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
2765                          (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
2766                         val = MACB_BIT(RMII);
2767                 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
2768                         val = MACB_BIT(MII);
2769 
2770                 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
2771                         val |= MACB_BIT(CLKEN);
2772 
2773                 macb_or_gem_writel(bp, USRIO, val);
2774         }
2775 
2776         /* Set MII management clock divider */
2777         val = macb_mdc_clk_div(bp);
2778         val |= macb_dbw(bp);
2779         if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
2780                 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
2781         macb_writel(bp, NCFGR, val);
2782 
2783         return 0;
2784 }
2785 
2786 #if defined(CONFIG_OF)
2787 /* 1518 rounded up */
2788 #define AT91ETHER_MAX_RBUFF_SZ  0x600
2789 /* max number of receive buffers */
2790 #define AT91ETHER_MAX_RX_DESCR  9
2791 
2792 /* Initialize and start the Receiver and Transmit subsystems */
2793 static int at91ether_start(struct net_device *dev)
2794 {
2795         struct macb *lp = netdev_priv(dev);
2796         struct macb_dma_desc *desc;
2797         dma_addr_t addr;
2798         u32 ctl;
2799         int i;
2800 
2801         lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
2802                                          (AT91ETHER_MAX_RX_DESCR *
2803                                           macb_dma_desc_get_size(lp)),
2804                                          &lp->rx_ring_dma, GFP_KERNEL);
2805         if (!lp->rx_ring)
2806                 return -ENOMEM;
2807 
2808         lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
2809                                             AT91ETHER_MAX_RX_DESCR *
2810                                             AT91ETHER_MAX_RBUFF_SZ,
2811                                             &lp->rx_buffers_dma, GFP_KERNEL);
2812         if (!lp->rx_buffers) {
2813                 dma_free_coherent(&lp->pdev->dev,
2814                                   AT91ETHER_MAX_RX_DESCR *
2815                                   macb_dma_desc_get_size(lp),
2816                                   lp->rx_ring, lp->rx_ring_dma);
2817                 lp->rx_ring = NULL;
2818                 return -ENOMEM;
2819         }
2820 
2821         addr = lp->rx_buffers_dma;
2822         for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
2823                 desc = macb_rx_desc(lp, i);
2824                 macb_set_addr(lp, desc, addr);
2825                 desc->ctrl = 0;
2826                 addr += AT91ETHER_MAX_RBUFF_SZ;
2827         }
2828 
2829         /* Set the Wrap bit on the last descriptor */
2830         desc->addr |= MACB_BIT(RX_WRAP);
2831 
2832         /* Reset buffer index */
2833         lp->rx_tail = 0;
2834 
2835         /* Program address of descriptor list in Rx Buffer Queue register */
2836         macb_writel(lp, RBQP, lp->rx_ring_dma);
2837 
2838         /* Enable Receive and Transmit */
2839         ctl = macb_readl(lp, NCR);
2840         macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
2841 
2842         return 0;
2843 }
2844 
2845 /* Open the ethernet interface */
2846 static int at91ether_open(struct net_device *dev)
2847 {
2848         struct macb *lp = netdev_priv(dev);
2849         u32 ctl;
2850         int ret;
2851 
2852         /* Clear internal statistics */
2853         ctl = macb_readl(lp, NCR);
2854         macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
2855 
2856         macb_set_hwaddr(lp);
2857 
2858         ret = at91ether_start(dev);
2859         if (ret)
2860                 return ret;
2861 
2862         /* Enable MAC interrupts */
2863         macb_writel(lp, IER, MACB_BIT(RCOMP)    |
2864                              MACB_BIT(RXUBR)    |
2865                              MACB_BIT(ISR_TUND) |
2866                              MACB_BIT(ISR_RLE)  |
2867                              MACB_BIT(TCOMP)    |
2868                              MACB_BIT(ISR_ROVR) |
2869                              MACB_BIT(HRESP));
2870 
2871         /* schedule a link state check */
2872         phy_start(dev->phydev);
2873 
2874         netif_start_queue(dev);
2875 
2876         return 0;
2877 }
2878 
2879 /* Close the interface */
2880 static int at91ether_close(struct net_device *dev)
2881 {
2882         struct macb *lp = netdev_priv(dev);
2883         u32 ctl;
2884 
2885         /* Disable Receiver and Transmitter */
2886         ctl = macb_readl(lp, NCR);
2887         macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
2888 
2889         /* Disable MAC interrupts */
2890         macb_writel(lp, IDR, MACB_BIT(RCOMP)    |
2891                              MACB_BIT(RXUBR)    |
2892                              MACB_BIT(ISR_TUND) |
2893                              MACB_BIT(ISR_RLE)  |
2894                              MACB_BIT(TCOMP)    |
2895                              MACB_BIT(ISR_ROVR) |
2896                              MACB_BIT(HRESP));
2897 
2898         netif_stop_queue(dev);
2899 
2900         dma_free_coherent(&lp->pdev->dev,
2901                           AT91ETHER_MAX_RX_DESCR *
2902                           macb_dma_desc_get_size(lp),
2903                           lp->rx_ring, lp->rx_ring_dma);
2904         lp->rx_ring = NULL;
2905 
2906         dma_free_coherent(&lp->pdev->dev,
2907                           AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
2908                           lp->rx_buffers, lp->rx_buffers_dma);
2909         lp->rx_buffers = NULL;
2910 
2911         return 0;
2912 }
2913 
2914 /* Transmit packet */
2915 static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
2916 {
2917         struct macb *lp = netdev_priv(dev);
2918 
2919         if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
2920                 netif_stop_queue(dev);
2921 
2922                 /* Store packet information (to free when Tx completed) */
2923                 lp->skb = skb;
2924                 lp->skb_length = skb->len;
2925                 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
2926                                                         DMA_TO_DEVICE);
2927                 if (dma_mapping_error(NULL, lp->skb_physaddr)) {
2928                         dev_kfree_skb_any(skb);
2929                         dev->stats.tx_dropped++;
2930                         netdev_err(dev, "%s: DMA mapping error\n", __func__);
2931                         return NETDEV_TX_OK;
2932                 }
2933 
2934                 /* Set address of the data in the Transmit Address register */
2935                 macb_writel(lp, TAR, lp->skb_physaddr);
2936                 /* Set length of the packet in the Transmit Control register */
2937                 macb_writel(lp, TCR, skb->len);
2938 
2939         } else {
2940                 netdev_err(dev, "%s called, but device is busy!\n", __func__);
2941                 return NETDEV_TX_BUSY;
2942         }
2943 
2944         return NETDEV_TX_OK;
2945 }
2946 
2947 /* Extract received frame from buffer descriptors and sent to upper layers.
2948  * (Called from interrupt context)
2949  */
2950 static void at91ether_rx(struct net_device *dev)
2951 {
2952         struct macb *lp = netdev_priv(dev);
2953         struct macb_dma_desc *desc;
2954         unsigned char *p_recv;
2955         struct sk_buff *skb;
2956         unsigned int pktlen;
2957 
2958         desc = macb_rx_desc(lp, lp->rx_tail);
2959         while (desc->addr & MACB_BIT(RX_USED)) {
2960                 p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
2961                 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
2962                 skb = netdev_alloc_skb(dev, pktlen + 2);
2963                 if (skb) {
2964                         skb_reserve(skb, 2);
2965                         memcpy(skb_put(skb, pktlen), p_recv, pktlen);
2966 
2967                         skb->protocol = eth_type_trans(skb, dev);
2968                         lp->stats.rx_packets++;
2969                         lp->stats.rx_bytes += pktlen;
2970                         netif_rx(skb);
2971                 } else {
2972                         lp->stats.rx_dropped++;
2973                 }
2974 
2975                 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
2976                         lp->stats.multicast++;
2977 
2978                 /* reset ownership bit */
2979                 desc->addr &= ~MACB_BIT(RX_USED);
2980 
2981                 /* wrap after last buffer */
2982                 if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
2983                         lp->rx_tail = 0;
2984                 else
2985                         lp->rx_tail++;
2986 
2987                 desc = macb_rx_desc(lp, lp->rx_tail);
2988         }
2989 }
2990 
2991 /* MAC interrupt handler */
2992 static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
2993 {
2994         struct net_device *dev = dev_id;
2995         struct macb *lp = netdev_priv(dev);
2996         u32 intstatus, ctl;
2997 
2998         /* MAC Interrupt Status register indicates what interrupts are pending.
2999          * It is automatically cleared once read.
3000          */
3001         intstatus = macb_readl(lp, ISR);
3002 
3003         /* Receive complete */
3004         if (intstatus & MACB_BIT(RCOMP))
3005                 at91ether_rx(dev);
3006 
3007         /* Transmit complete */
3008         if (intstatus & MACB_BIT(TCOMP)) {
3009                 /* The TCOM bit is set even if the transmission failed */
3010                 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
3011                         lp->stats.tx_errors++;
3012 
3013                 if (lp->skb) {
3014                         dev_kfree_skb_irq(lp->skb);
3015                         lp->skb = NULL;
3016                         dma_unmap_single(NULL, lp->skb_physaddr,
3017                                          lp->skb_length, DMA_TO_DEVICE);
3018                         lp->stats.tx_packets++;
3019                         lp->stats.tx_bytes += lp->skb_length;
3020                 }
3021                 netif_wake_queue(dev);
3022         }
3023 
3024         /* Work-around for EMAC Errata section 41.3.1 */
3025         if (intstatus & MACB_BIT(RXUBR)) {
3026                 ctl = macb_readl(lp, NCR);
3027                 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
3028                 wmb();
3029                 macb_writel(lp, NCR, ctl | MACB_BIT(RE));
3030         }
3031 
3032         if (intstatus & MACB_BIT(ISR_ROVR))
3033                 netdev_err(dev, "ROVR error\n");
3034 
3035         return IRQ_HANDLED;
3036 }
3037 
3038 #ifdef CONFIG_NET_POLL_CONTROLLER
3039 static void at91ether_poll_controller(struct net_device *dev)
3040 {
3041         unsigned long flags;
3042 
3043         local_irq_save(flags);
3044         at91ether_interrupt(dev->irq, dev);
3045         local_irq_restore(flags);
3046 }
3047 #endif
3048 
3049 static const struct net_device_ops at91ether_netdev_ops = {
3050         .ndo_open               = at91ether_open,
3051         .ndo_stop               = at91ether_close,
3052         .ndo_start_xmit         = at91ether_start_xmit,
3053         .ndo_get_stats          = macb_get_stats,
3054         .ndo_set_rx_mode        = macb_set_rx_mode,
3055         .ndo_set_mac_address    = eth_mac_addr,
3056         .ndo_do_ioctl           = macb_ioctl,
3057         .ndo_validate_addr      = eth_validate_addr,
3058 #ifdef CONFIG_NET_POLL_CONTROLLER
3059         .ndo_poll_controller    = at91ether_poll_controller,
3060 #endif
3061 };
3062 
3063 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
3064                               struct clk **hclk, struct clk **tx_clk,
3065                               struct clk **rx_clk)
3066 {
3067         int err;
3068 
3069         *hclk = NULL;
3070         *tx_clk = NULL;
3071         *rx_clk = NULL;
3072 
3073         *pclk = devm_clk_get(&pdev->dev, "ether_clk");
3074         if (IS_ERR(*pclk))
3075                 return PTR_ERR(*pclk);
3076 
3077         err = clk_prepare_enable(*pclk);
3078         if (err) {
3079                 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
3080                 return err;
3081         }
3082 
3083         return 0;
3084 }
3085 
3086 static int at91ether_init(struct platform_device *pdev)
3087 {
3088         struct net_device *dev = platform_get_drvdata(pdev);
3089         struct macb *bp = netdev_priv(dev);
3090         int err;
3091         u32 reg;
3092 
3093         dev->netdev_ops = &at91ether_netdev_ops;
3094         dev->ethtool_ops = &macb_ethtool_ops;
3095 
3096         err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
3097                                0, dev->name, dev);
3098         if (err)
3099                 return err;
3100 
3101         macb_writel(bp, NCR, 0);
3102 
3103         reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
3104         if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
3105                 reg |= MACB_BIT(RM9200_RMII);
3106 
3107         macb_writel(bp, NCFGR, reg);
3108 
3109         return 0;
3110 }
3111 
3112 static const struct macb_config at91sam9260_config = {
3113         .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3114         .clk_init = macb_clk_init,
3115         .init = macb_init,
3116 };
3117 
3118 static const struct macb_config pc302gem_config = {
3119         .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
3120         .dma_burst_length = 16,
3121         .clk_init = macb_clk_init,
3122         .init = macb_init,
3123 };
3124 
3125 static const struct macb_config sama5d2_config = {
3126         .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3127         .dma_burst_length = 16,
3128         .clk_init = macb_clk_init,
3129         .init = macb_init,
3130 };
3131 
3132 static const struct macb_config sama5d3_config = {
3133         .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
3134               | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3135         .dma_burst_length = 16,
3136         .clk_init = macb_clk_init,
3137         .init = macb_init,
3138 };
3139 
3140 static const struct macb_config sama5d4_config = {
3141         .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3142         .dma_burst_length = 4,
3143         .clk_init = macb_clk_init,
3144         .init = macb_init,
3145 };
3146 
3147 static const struct macb_config emac_config = {
3148         .clk_init = at91ether_clk_init,
3149         .init = at91ether_init,
3150 };
3151 
3152 static const struct macb_config np4_config = {
3153         .caps = MACB_CAPS_USRIO_DISABLED,
3154         .clk_init = macb_clk_init,
3155         .init = macb_init,
3156 };
3157 
3158 static const struct macb_config zynqmp_config = {
3159         .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
3160         .dma_burst_length = 16,
3161         .clk_init = macb_clk_init,
3162         .init = macb_init,
3163         .jumbo_max_len = 10240,
3164 };
3165 
3166 static const struct macb_config zynq_config = {
3167         .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
3168         .dma_burst_length = 16,
3169         .clk_init = macb_clk_init,
3170         .init = macb_init,
3171 };
3172 
3173 static const struct of_device_id macb_dt_ids[] = {
3174         { .compatible = "cdns,at32ap7000-macb" },
3175         { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
3176         { .compatible = "cdns,macb" },
3177         { .compatible = "cdns,np4-macb", .data = &np4_config },
3178         { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
3179         { .compatible = "cdns,gem", .data = &pc302gem_config },
3180         { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
3181         { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
3182         { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
3183         { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
3184         { .compatible = "cdns,emac", .data = &emac_config },
3185         { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
3186         { .compatible = "cdns,zynq-gem", .data = &zynq_config },
3187         { /* sentinel */ }
3188 };
3189 MODULE_DEVICE_TABLE(of, macb_dt_ids);
3190 #endif /* CONFIG_OF */
3191 
3192 static const struct macb_config default_gem_config = {
3193         .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
3194         .dma_burst_length = 16,
3195         .clk_init = macb_clk_init,
3196         .init = macb_init,
3197         .jumbo_max_len = 10240,
3198 };
3199 
3200 static int macb_probe(struct platform_device *pdev)
3201 {
3202         const struct macb_config *macb_config = &default_gem_config;
3203         int (*clk_init)(struct platform_device *, struct clk **,
3204                         struct clk **, struct clk **,  struct clk **)
3205                                               = macb_config->clk_init;
3206         int (*init)(struct platform_device *) = macb_config->init;
3207         struct device_node *np = pdev->dev.of_node;
3208         struct device_node *phy_node;
3209         struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
3210         unsigned int queue_mask, num_queues;
3211         struct macb_platform_data *pdata;
3212         bool native_io;
3213         struct phy_device *phydev;
3214         struct net_device *dev;
3215         struct resource *regs;
3216         void __iomem *mem;
3217         const char *mac;
3218         struct macb *bp;
3219         int err;
3220 
3221         regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3222         mem = devm_ioremap_resource(&pdev->dev, regs);
3223         if (IS_ERR(mem))
3224                 return PTR_ERR(mem);
3225 
3226         if (np) {
3227                 const struct of_device_id *match;
3228 
3229                 match = of_match_node(macb_dt_ids, np);
3230                 if (match && match->data) {
3231                         macb_config = match->data;
3232                         clk_init = macb_config->clk_init;
3233                         init = macb_config->init;
3234                 }
3235         }
3236 
3237         err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk);
3238         if (err)
3239                 return err;
3240 
3241         native_io = hw_is_native_io(mem);
3242 
3243         macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
3244         dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
3245         if (!dev) {
3246                 err = -ENOMEM;
3247                 goto err_disable_clocks;
3248         }
3249 
3250         dev->base_addr = regs->start;
3251 
3252         SET_NETDEV_DEV(dev, &pdev->dev);
3253 
3254         bp = netdev_priv(dev);
3255         bp->pdev = pdev;
3256         bp->dev = dev;
3257         bp->regs = mem;
3258         bp->native_io = native_io;
3259         if (native_io) {
3260                 bp->macb_reg_readl = hw_readl_native;
3261                 bp->macb_reg_writel = hw_writel_native;
3262         } else {
3263                 bp->macb_reg_readl = hw_readl;
3264                 bp->macb_reg_writel = hw_writel;
3265         }
3266         bp->num_queues = num_queues;
3267         bp->queue_mask = queue_mask;
3268         if (macb_config)
3269                 bp->dma_burst_length = macb_config->dma_burst_length;
3270         bp->pclk = pclk;
3271         bp->hclk = hclk;
3272         bp->tx_clk = tx_clk;
3273         bp->rx_clk = rx_clk;
3274         if (macb_config)
3275                 bp->jumbo_max_len = macb_config->jumbo_max_len;
3276 
3277         bp->wol = 0;
3278         if (of_get_property(np, "magic-packet", NULL))
3279                 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
3280         device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
3281 
3282 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3283         if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
3284                 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
3285                 bp->hw_dma_cap = HW_DMA_CAP_64B;
3286         } else
3287                 bp->hw_dma_cap = HW_DMA_CAP_32B;
3288 #endif
3289 
3290         spin_lock_init(&bp->lock);
3291 
3292         /* setup capabilities */
3293         macb_configure_caps(bp, macb_config);
3294 
3295         platform_set_drvdata(pdev, dev);
3296 
3297         dev->irq = platform_get_irq(pdev, 0);
3298         if (dev->irq < 0) {
3299                 err = dev->irq;
3300                 goto err_out_free_netdev;
3301         }
3302 
3303         /* MTU range: 68 - 1500 or 10240 */
3304         dev->min_mtu = GEM_MTU_MIN_SIZE;
3305         if (bp->caps & MACB_CAPS_JUMBO)
3306                 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
3307         else
3308                 dev->max_mtu = ETH_DATA_LEN;
3309 
3310         mac = of_get_mac_address(np);
3311         if (mac)
3312                 ether_addr_copy(bp->dev->dev_addr, mac);
3313         else
3314                 macb_get_hwaddr(bp);
3315 
3316         /* Power up the PHY if there is a GPIO reset */
3317         phy_node =  of_get_next_available_child(np, NULL);
3318         if (phy_node) {
3319                 int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
3320 
3321                 if (gpio_is_valid(gpio)) {
3322                         bp->reset_gpio = gpio_to_desc(gpio);
3323                         gpiod_direction_output(bp->reset_gpio, 1);
3324                 }
3325         }
3326         of_node_put(phy_node);
3327 
3328         err = of_get_phy_mode(np);
3329         if (err < 0) {
3330                 pdata = dev_get_platdata(&pdev->dev);
3331                 if (pdata && pdata->is_rmii)
3332                         bp->phy_interface = PHY_INTERFACE_MODE_RMII;
3333                 else
3334                         bp->phy_interface = PHY_INTERFACE_MODE_MII;
3335         } else {
3336                 bp->phy_interface = err;
3337         }
3338 
3339         /* IP specific init */
3340         err = init(pdev);
3341         if (err)
3342                 goto err_out_free_netdev;
3343 
3344         err = macb_mii_init(bp);
3345         if (err)
3346                 goto err_out_free_netdev;
3347 
3348         phydev = dev->phydev;
3349 
3350         netif_carrier_off(dev);
3351 
3352         err = register_netdev(dev);
3353         if (err) {
3354                 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
3355                 goto err_out_unregister_mdio;
3356         }
3357 
3358         phy_attached_info(phydev);
3359 
3360         netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
3361                     macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
3362                     dev->base_addr, dev->irq, dev->dev_addr);
3363 
3364         return 0;
3365 
3366 err_out_unregister_mdio:
3367         phy_disconnect(dev->phydev);
3368         mdiobus_unregister(bp->mii_bus);
3369         mdiobus_free(bp->mii_bus);
3370 
3371         /* Shutdown the PHY if there is a GPIO reset */
3372         if (bp->reset_gpio)
3373                 gpiod_set_value(bp->reset_gpio, 0);
3374 
3375 err_out_free_netdev:
3376         free_netdev(dev);
3377 
3378 err_disable_clocks:
3379         clk_disable_unprepare(tx_clk);
3380         clk_disable_unprepare(hclk);
3381         clk_disable_unprepare(pclk);
3382         clk_disable_unprepare(rx_clk);
3383 
3384         return err;
3385 }
3386 
3387 static int macb_remove(struct platform_device *pdev)
3388 {
3389         struct net_device *dev;
3390         struct macb *bp;
3391 
3392         dev = platform_get_drvdata(pdev);
3393 
3394         if (dev) {
3395                 bp = netdev_priv(dev);
3396                 if (dev->phydev)
3397                         phy_disconnect(dev->phydev);
3398                 mdiobus_unregister(bp->mii_bus);
3399                 dev->phydev = NULL;
3400                 mdiobus_free(bp->mii_bus);
3401 
3402                 /* Shutdown the PHY if there is a GPIO reset */
3403                 if (bp->reset_gpio)
3404                         gpiod_set_value(bp->reset_gpio, 0);
3405 
3406                 unregister_netdev(dev);
3407                 clk_disable_unprepare(bp->tx_clk);
3408                 clk_disable_unprepare(bp->hclk);
3409                 clk_disable_unprepare(bp->pclk);
3410                 clk_disable_unprepare(bp->rx_clk);
3411                 free_netdev(dev);
3412         }
3413 
3414         return 0;
3415 }
3416 
3417 static int __maybe_unused macb_suspend(struct device *dev)
3418 {
3419         struct platform_device *pdev = to_platform_device(dev);
3420         struct net_device *netdev = platform_get_drvdata(pdev);
3421         struct macb *bp = netdev_priv(netdev);
3422 
3423         netif_carrier_off(netdev);
3424         netif_device_detach(netdev);
3425 
3426         if (bp->wol & MACB_WOL_ENABLED) {
3427                 macb_writel(bp, IER, MACB_BIT(WOL));
3428                 macb_writel(bp, WOL, MACB_BIT(MAG));
3429                 enable_irq_wake(bp->queues[0].irq);
3430         } else {
3431                 clk_disable_unprepare(bp->tx_clk);
3432                 clk_disable_unprepare(bp->hclk);
3433                 clk_disable_unprepare(bp->pclk);
3434                 clk_disable_unprepare(bp->rx_clk);
3435         }
3436 
3437         return 0;
3438 }
3439 
3440 static int __maybe_unused macb_resume(struct device *dev)
3441 {
3442         struct platform_device *pdev = to_platform_device(dev);
3443         struct net_device *netdev = platform_get_drvdata(pdev);
3444         struct macb *bp = netdev_priv(netdev);
3445 
3446         if (bp->wol & MACB_WOL_ENABLED) {
3447                 macb_writel(bp, IDR, MACB_BIT(WOL));
3448                 macb_writel(bp, WOL, 0);
3449                 disable_irq_wake(bp->queues[0].irq);
3450         } else {
3451                 clk_prepare_enable(bp->pclk);
3452                 clk_prepare_enable(bp->hclk);
3453                 clk_prepare_enable(bp->tx_clk);
3454                 clk_prepare_enable(bp->rx_clk);
3455         }
3456 
3457         netif_device_attach(netdev);
3458 
3459         return 0;
3460 }
3461 
3462 static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
3463 
3464 static struct platform_driver macb_driver = {
3465         .probe          = macb_probe,
3466         .remove         = macb_remove,
3467         .driver         = {
3468                 .name           = "macb",
3469                 .of_match_table = of_match_ptr(macb_dt_ids),
3470                 .pm     = &macb_pm_ops,
3471         },
3472 };
3473 
3474 module_platform_driver(macb_driver);
3475 
3476 MODULE_LICENSE("GPL");
3477 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
3478 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
3479 MODULE_ALIAS("platform:macb");
3480 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us