Version:  2.0.40 2.2.26 2.4.37 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4

Linux/drivers/net/ethernet/altera/altera_tse_main.c

  1 /* Altera Triple-Speed Ethernet MAC driver
  2  * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
  3  *
  4  * Contributors:
  5  *   Dalon Westergreen
  6  *   Thomas Chou
  7  *   Ian Abbott
  8  *   Yuriy Kozlov
  9  *   Tobias Klauser
 10  *   Andriy Smolskyy
 11  *   Roman Bulgakov
 12  *   Dmytro Mytarchuk
 13  *   Matthew Gerlach
 14  *
 15  * Original driver contributed by SLS.
 16  * Major updates contributed by GlobalLogic
 17  *
 18  * This program is free software; you can redistribute it and/or modify it
 19  * under the terms and conditions of the GNU General Public License,
 20  * version 2, as published by the Free Software Foundation.
 21  *
 22  * This program is distributed in the hope it will be useful, but WITHOUT
 23  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 24  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 25  * more details.
 26  *
 27  * You should have received a copy of the GNU General Public License along with
 28  * this program.  If not, see <http://www.gnu.org/licenses/>.
 29  */
 30 
 31 #include <linux/atomic.h>
 32 #include <linux/delay.h>
 33 #include <linux/etherdevice.h>
 34 #include <linux/if_vlan.h>
 35 #include <linux/init.h>
 36 #include <linux/interrupt.h>
 37 #include <linux/io.h>
 38 #include <linux/kernel.h>
 39 #include <linux/module.h>
 40 #include <linux/netdevice.h>
 41 #include <linux/of_device.h>
 42 #include <linux/of_mdio.h>
 43 #include <linux/of_net.h>
 44 #include <linux/of_platform.h>
 45 #include <linux/phy.h>
 46 #include <linux/platform_device.h>
 47 #include <linux/skbuff.h>
 48 #include <asm/cacheflush.h>
 49 
 50 #include "altera_utils.h"
 51 #include "altera_tse.h"
 52 #include "altera_sgdma.h"
 53 #include "altera_msgdma.h"
 54 
 55 static atomic_t instance_count = ATOMIC_INIT(~0);
 56 /* Module parameters */
 57 static int debug = -1;
 58 module_param(debug, int, S_IRUGO | S_IWUSR);
 59 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
 60 
 61 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
 62                                         NETIF_MSG_LINK | NETIF_MSG_IFUP |
 63                                         NETIF_MSG_IFDOWN);
 64 
 65 #define RX_DESCRIPTORS 64
 66 static int dma_rx_num = RX_DESCRIPTORS;
 67 module_param(dma_rx_num, int, S_IRUGO | S_IWUSR);
 68 MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list");
 69 
 70 #define TX_DESCRIPTORS 64
 71 static int dma_tx_num = TX_DESCRIPTORS;
 72 module_param(dma_tx_num, int, S_IRUGO | S_IWUSR);
 73 MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
 74 
 75 
 76 #define POLL_PHY (-1)
 77 
 78 /* Make sure DMA buffer size is larger than the max frame size
 79  * plus some alignment offset and a VLAN header. If the max frame size is
 80  * 1518, a VLAN header would be additional 4 bytes and additional
 81  * headroom for alignment is 2 bytes, 2048 is just fine.
 82  */
 83 #define ALTERA_RXDMABUFFER_SIZE 2048
 84 
 85 /* Allow network stack to resume queueing packets after we've
 86  * finished transmitting at least 1/4 of the packets in the queue.
 87  */
 88 #define TSE_TX_THRESH(x)        (x->tx_ring_size / 4)
 89 
 90 #define TXQUEUESTOP_THRESHHOLD  2
 91 
 92 static const struct of_device_id altera_tse_ids[];
 93 
 94 static inline u32 tse_tx_avail(struct altera_tse_private *priv)
 95 {
 96         return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
 97 }
 98 
 99 /* MDIO specific functions
100  */
101 static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
102 {
103         struct net_device *ndev = bus->priv;
104         struct altera_tse_private *priv = netdev_priv(ndev);
105 
106         /* set MDIO address */
107         csrwr32((mii_id & 0x1f), priv->mac_dev,
108                 tse_csroffs(mdio_phy1_addr));
109 
110         /* get the data */
111         return csrrd32(priv->mac_dev,
112                        tse_csroffs(mdio_phy1) + regnum * 4) & 0xffff;
113 }
114 
115 static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
116                                  u16 value)
117 {
118         struct net_device *ndev = bus->priv;
119         struct altera_tse_private *priv = netdev_priv(ndev);
120 
121         /* set MDIO address */
122         csrwr32((mii_id & 0x1f), priv->mac_dev,
123                 tse_csroffs(mdio_phy1_addr));
124 
125         /* write the data */
126         csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy1) + regnum * 4);
127         return 0;
128 }
129 
130 static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
131 {
132         struct altera_tse_private *priv = netdev_priv(dev);
133         int ret;
134         int i;
135         struct device_node *mdio_node = NULL;
136         struct mii_bus *mdio = NULL;
137         struct device_node *child_node = NULL;
138 
139         for_each_child_of_node(priv->device->of_node, child_node) {
140                 if (of_device_is_compatible(child_node, "altr,tse-mdio")) {
141                         mdio_node = child_node;
142                         break;
143                 }
144         }
145 
146         if (mdio_node) {
147                 netdev_dbg(dev, "FOUND MDIO subnode\n");
148         } else {
149                 netdev_dbg(dev, "NO MDIO subnode\n");
150                 return 0;
151         }
152 
153         mdio = mdiobus_alloc();
154         if (mdio == NULL) {
155                 netdev_err(dev, "Error allocating MDIO bus\n");
156                 return -ENOMEM;
157         }
158 
159         mdio->name = ALTERA_TSE_RESOURCE_NAME;
160         mdio->read = &altera_tse_mdio_read;
161         mdio->write = &altera_tse_mdio_write;
162         snprintf(mdio->id, MII_BUS_ID_SIZE, "%s-%u", mdio->name, id);
163 
164         mdio->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
165         if (mdio->irq == NULL) {
166                 ret = -ENOMEM;
167                 goto out_free_mdio;
168         }
169         for (i = 0; i < PHY_MAX_ADDR; i++)
170                 mdio->irq[i] = PHY_POLL;
171 
172         mdio->priv = dev;
173         mdio->parent = priv->device;
174 
175         ret = of_mdiobus_register(mdio, mdio_node);
176         if (ret != 0) {
177                 netdev_err(dev, "Cannot register MDIO bus %s\n",
178                            mdio->id);
179                 goto out_free_mdio_irq;
180         }
181 
182         if (netif_msg_drv(priv))
183                 netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
184 
185         priv->mdio = mdio;
186         return 0;
187 out_free_mdio_irq:
188         kfree(mdio->irq);
189 out_free_mdio:
190         mdiobus_free(mdio);
191         mdio = NULL;
192         return ret;
193 }
194 
195 static void altera_tse_mdio_destroy(struct net_device *dev)
196 {
197         struct altera_tse_private *priv = netdev_priv(dev);
198 
199         if (priv->mdio == NULL)
200                 return;
201 
202         if (netif_msg_drv(priv))
203                 netdev_info(dev, "MDIO bus %s: removed\n",
204                             priv->mdio->id);
205 
206         mdiobus_unregister(priv->mdio);
207         kfree(priv->mdio->irq);
208         mdiobus_free(priv->mdio);
209         priv->mdio = NULL;
210 }
211 
212 static int tse_init_rx_buffer(struct altera_tse_private *priv,
213                               struct tse_buffer *rxbuffer, int len)
214 {
215         rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len);
216         if (!rxbuffer->skb)
217                 return -ENOMEM;
218 
219         rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data,
220                                                 len,
221                                                 DMA_FROM_DEVICE);
222 
223         if (dma_mapping_error(priv->device, rxbuffer->dma_addr)) {
224                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
225                 dev_kfree_skb_any(rxbuffer->skb);
226                 return -EINVAL;
227         }
228         rxbuffer->dma_addr &= (dma_addr_t)~3;
229         rxbuffer->len = len;
230         return 0;
231 }
232 
233 static void tse_free_rx_buffer(struct altera_tse_private *priv,
234                                struct tse_buffer *rxbuffer)
235 {
236         struct sk_buff *skb = rxbuffer->skb;
237         dma_addr_t dma_addr = rxbuffer->dma_addr;
238 
239         if (skb != NULL) {
240                 if (dma_addr)
241                         dma_unmap_single(priv->device, dma_addr,
242                                          rxbuffer->len,
243                                          DMA_FROM_DEVICE);
244                 dev_kfree_skb_any(skb);
245                 rxbuffer->skb = NULL;
246                 rxbuffer->dma_addr = 0;
247         }
248 }
249 
250 /* Unmap and free Tx buffer resources
251  */
252 static void tse_free_tx_buffer(struct altera_tse_private *priv,
253                                struct tse_buffer *buffer)
254 {
255         if (buffer->dma_addr) {
256                 if (buffer->mapped_as_page)
257                         dma_unmap_page(priv->device, buffer->dma_addr,
258                                        buffer->len, DMA_TO_DEVICE);
259                 else
260                         dma_unmap_single(priv->device, buffer->dma_addr,
261                                          buffer->len, DMA_TO_DEVICE);
262                 buffer->dma_addr = 0;
263         }
264         if (buffer->skb) {
265                 dev_kfree_skb_any(buffer->skb);
266                 buffer->skb = NULL;
267         }
268 }
269 
270 static int alloc_init_skbufs(struct altera_tse_private *priv)
271 {
272         unsigned int rx_descs = priv->rx_ring_size;
273         unsigned int tx_descs = priv->tx_ring_size;
274         int ret = -ENOMEM;
275         int i;
276 
277         /* Create Rx ring buffer */
278         priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer),
279                                 GFP_KERNEL);
280         if (!priv->rx_ring)
281                 goto err_rx_ring;
282 
283         /* Create Tx ring buffer */
284         priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer),
285                                 GFP_KERNEL);
286         if (!priv->tx_ring)
287                 goto err_tx_ring;
288 
289         priv->tx_cons = 0;
290         priv->tx_prod = 0;
291 
292         /* Init Rx ring */
293         for (i = 0; i < rx_descs; i++) {
294                 ret = tse_init_rx_buffer(priv, &priv->rx_ring[i],
295                                          priv->rx_dma_buf_sz);
296                 if (ret)
297                         goto err_init_rx_buffers;
298         }
299 
300         priv->rx_cons = 0;
301         priv->rx_prod = 0;
302 
303         return 0;
304 err_init_rx_buffers:
305         while (--i >= 0)
306                 tse_free_rx_buffer(priv, &priv->rx_ring[i]);
307         kfree(priv->tx_ring);
308 err_tx_ring:
309         kfree(priv->rx_ring);
310 err_rx_ring:
311         return ret;
312 }
313 
314 static void free_skbufs(struct net_device *dev)
315 {
316         struct altera_tse_private *priv = netdev_priv(dev);
317         unsigned int rx_descs = priv->rx_ring_size;
318         unsigned int tx_descs = priv->tx_ring_size;
319         int i;
320 
321         /* Release the DMA TX/RX socket buffers */
322         for (i = 0; i < rx_descs; i++)
323                 tse_free_rx_buffer(priv, &priv->rx_ring[i]);
324         for (i = 0; i < tx_descs; i++)
325                 tse_free_tx_buffer(priv, &priv->tx_ring[i]);
326 
327 
328         kfree(priv->tx_ring);
329 }
330 
331 /* Reallocate the skb for the reception process
332  */
333 static inline void tse_rx_refill(struct altera_tse_private *priv)
334 {
335         unsigned int rxsize = priv->rx_ring_size;
336         unsigned int entry;
337         int ret;
338 
339         for (; priv->rx_cons - priv->rx_prod > 0;
340                         priv->rx_prod++) {
341                 entry = priv->rx_prod % rxsize;
342                 if (likely(priv->rx_ring[entry].skb == NULL)) {
343                         ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry],
344                                 priv->rx_dma_buf_sz);
345                         if (unlikely(ret != 0))
346                                 break;
347                         priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]);
348                 }
349         }
350 }
351 
352 /* Pull out the VLAN tag and fix up the packet
353  */
354 static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
355 {
356         struct ethhdr *eth_hdr;
357         u16 vid;
358         if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
359             !__vlan_get_tag(skb, &vid)) {
360                 eth_hdr = (struct ethhdr *)skb->data;
361                 memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
362                 skb_pull(skb, VLAN_HLEN);
363                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
364         }
365 }
366 
367 /* Receive a packet: retrieve and pass over to upper levels
368  */
369 static int tse_rx(struct altera_tse_private *priv, int limit)
370 {
371         unsigned int count = 0;
372         unsigned int next_entry;
373         struct sk_buff *skb;
374         unsigned int entry = priv->rx_cons % priv->rx_ring_size;
375         u32 rxstatus;
376         u16 pktlength;
377         u16 pktstatus;
378 
379         /* Check for count < limit first as get_rx_status is changing
380         * the response-fifo so we must process the next packet
381         * after calling get_rx_status if a response is pending.
382         * (reading the last byte of the response pops the value from the fifo.)
383         */
384         while ((count < limit) &&
385                ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0)) {
386                 pktstatus = rxstatus >> 16;
387                 pktlength = rxstatus & 0xffff;
388 
389                 if ((pktstatus & 0xFF) || (pktlength == 0))
390                         netdev_err(priv->dev,
391                                    "RCV pktstatus %08X pktlength %08X\n",
392                                    pktstatus, pktlength);
393 
394                 /* DMA trasfer from TSE starts with 2 aditional bytes for
395                  * IP payload alignment. Status returned by get_rx_status()
396                  * contains DMA transfer length. Packet is 2 bytes shorter.
397                  */
398                 pktlength -= 2;
399 
400                 count++;
401                 next_entry = (++priv->rx_cons) % priv->rx_ring_size;
402 
403                 skb = priv->rx_ring[entry].skb;
404                 if (unlikely(!skb)) {
405                         netdev_err(priv->dev,
406                                    "%s: Inconsistent Rx descriptor chain\n",
407                                    __func__);
408                         priv->dev->stats.rx_dropped++;
409                         break;
410                 }
411                 priv->rx_ring[entry].skb = NULL;
412 
413                 skb_put(skb, pktlength);
414 
415                 /* make cache consistent with receive packet buffer */
416                 dma_sync_single_for_cpu(priv->device,
417                                         priv->rx_ring[entry].dma_addr,
418                                         priv->rx_ring[entry].len,
419                                         DMA_FROM_DEVICE);
420 
421                 dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr,
422                                  priv->rx_ring[entry].len, DMA_FROM_DEVICE);
423 
424                 if (netif_msg_pktdata(priv)) {
425                         netdev_info(priv->dev, "frame received %d bytes\n",
426                                     pktlength);
427                         print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET,
428                                        16, 1, skb->data, pktlength, true);
429                 }
430 
431                 tse_rx_vlan(priv->dev, skb);
432 
433                 skb->protocol = eth_type_trans(skb, priv->dev);
434                 skb_checksum_none_assert(skb);
435 
436                 napi_gro_receive(&priv->napi, skb);
437 
438                 priv->dev->stats.rx_packets++;
439                 priv->dev->stats.rx_bytes += pktlength;
440 
441                 entry = next_entry;
442 
443                 tse_rx_refill(priv);
444         }
445 
446         return count;
447 }
448 
449 /* Reclaim resources after transmission completes
450  */
451 static int tse_tx_complete(struct altera_tse_private *priv)
452 {
453         unsigned int txsize = priv->tx_ring_size;
454         u32 ready;
455         unsigned int entry;
456         struct tse_buffer *tx_buff;
457         int txcomplete = 0;
458 
459         spin_lock(&priv->tx_lock);
460 
461         ready = priv->dmaops->tx_completions(priv);
462 
463         /* Free sent buffers */
464         while (ready && (priv->tx_cons != priv->tx_prod)) {
465                 entry = priv->tx_cons % txsize;
466                 tx_buff = &priv->tx_ring[entry];
467 
468                 if (netif_msg_tx_done(priv))
469                         netdev_dbg(priv->dev, "%s: curr %d, dirty %d\n",
470                                    __func__, priv->tx_prod, priv->tx_cons);
471 
472                 if (likely(tx_buff->skb))
473                         priv->dev->stats.tx_packets++;
474 
475                 tse_free_tx_buffer(priv, tx_buff);
476                 priv->tx_cons++;
477 
478                 txcomplete++;
479                 ready--;
480         }
481 
482         if (unlikely(netif_queue_stopped(priv->dev) &&
483                      tse_tx_avail(priv) > TSE_TX_THRESH(priv))) {
484                 netif_tx_lock(priv->dev);
485                 if (netif_queue_stopped(priv->dev) &&
486                     tse_tx_avail(priv) > TSE_TX_THRESH(priv)) {
487                         if (netif_msg_tx_done(priv))
488                                 netdev_dbg(priv->dev, "%s: restart transmit\n",
489                                            __func__);
490                         netif_wake_queue(priv->dev);
491                 }
492                 netif_tx_unlock(priv->dev);
493         }
494 
495         spin_unlock(&priv->tx_lock);
496         return txcomplete;
497 }
498 
499 /* NAPI polling function
500  */
501 static int tse_poll(struct napi_struct *napi, int budget)
502 {
503         struct altera_tse_private *priv =
504                         container_of(napi, struct altera_tse_private, napi);
505         int rxcomplete = 0;
506         unsigned long int flags;
507 
508         tse_tx_complete(priv);
509 
510         rxcomplete = tse_rx(priv, budget);
511 
512         if (rxcomplete < budget) {
513 
514                 napi_complete(napi);
515 
516                 netdev_dbg(priv->dev,
517                            "NAPI Complete, did %d packets with budget %d\n",
518                            rxcomplete, budget);
519 
520                 spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
521                 priv->dmaops->enable_rxirq(priv);
522                 priv->dmaops->enable_txirq(priv);
523                 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
524         }
525         return rxcomplete;
526 }
527 
528 /* DMA TX & RX FIFO interrupt routing
529  */
530 static irqreturn_t altera_isr(int irq, void *dev_id)
531 {
532         struct net_device *dev = dev_id;
533         struct altera_tse_private *priv;
534 
535         if (unlikely(!dev)) {
536                 pr_err("%s: invalid dev pointer\n", __func__);
537                 return IRQ_NONE;
538         }
539         priv = netdev_priv(dev);
540 
541         spin_lock(&priv->rxdma_irq_lock);
542         /* reset IRQs */
543         priv->dmaops->clear_rxirq(priv);
544         priv->dmaops->clear_txirq(priv);
545         spin_unlock(&priv->rxdma_irq_lock);
546 
547         if (likely(napi_schedule_prep(&priv->napi))) {
548                 spin_lock(&priv->rxdma_irq_lock);
549                 priv->dmaops->disable_rxirq(priv);
550                 priv->dmaops->disable_txirq(priv);
551                 spin_unlock(&priv->rxdma_irq_lock);
552                 __napi_schedule(&priv->napi);
553         }
554 
555 
556         return IRQ_HANDLED;
557 }
558 
559 /* Transmit a packet (called by the kernel). Dispatches
560  * either the SGDMA method for transmitting or the
561  * MSGDMA method, assumes no scatter/gather support,
562  * implying an assumption that there's only one
563  * physically contiguous fragment starting at
564  * skb->data, for length of skb_headlen(skb).
565  */
566 static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
567 {
568         struct altera_tse_private *priv = netdev_priv(dev);
569         unsigned int txsize = priv->tx_ring_size;
570         unsigned int entry;
571         struct tse_buffer *buffer = NULL;
572         int nfrags = skb_shinfo(skb)->nr_frags;
573         unsigned int nopaged_len = skb_headlen(skb);
574         enum netdev_tx ret = NETDEV_TX_OK;
575         dma_addr_t dma_addr;
576 
577         spin_lock_bh(&priv->tx_lock);
578 
579         if (unlikely(tse_tx_avail(priv) < nfrags + 1)) {
580                 if (!netif_queue_stopped(dev)) {
581                         netif_stop_queue(dev);
582                         /* This is a hard error, log it. */
583                         netdev_err(priv->dev,
584                                    "%s: Tx list full when queue awake\n",
585                                    __func__);
586                 }
587                 ret = NETDEV_TX_BUSY;
588                 goto out;
589         }
590 
591         /* Map the first skb fragment */
592         entry = priv->tx_prod % txsize;
593         buffer = &priv->tx_ring[entry];
594 
595         dma_addr = dma_map_single(priv->device, skb->data, nopaged_len,
596                                   DMA_TO_DEVICE);
597         if (dma_mapping_error(priv->device, dma_addr)) {
598                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
599                 ret = NETDEV_TX_OK;
600                 goto out;
601         }
602 
603         buffer->skb = skb;
604         buffer->dma_addr = dma_addr;
605         buffer->len = nopaged_len;
606 
607         /* Push data out of the cache hierarchy into main memory */
608         dma_sync_single_for_device(priv->device, buffer->dma_addr,
609                                    buffer->len, DMA_TO_DEVICE);
610 
611         priv->dmaops->tx_buffer(priv, buffer);
612 
613         skb_tx_timestamp(skb);
614 
615         priv->tx_prod++;
616         dev->stats.tx_bytes += skb->len;
617 
618         if (unlikely(tse_tx_avail(priv) <= TXQUEUESTOP_THRESHHOLD)) {
619                 if (netif_msg_hw(priv))
620                         netdev_dbg(priv->dev, "%s: stop transmitted packets\n",
621                                    __func__);
622                 netif_stop_queue(dev);
623         }
624 
625 out:
626         spin_unlock_bh(&priv->tx_lock);
627 
628         return ret;
629 }
630 
631 /* Called every time the controller might need to be made
632  * aware of new link state.  The PHY code conveys this
633  * information through variables in the phydev structure, and this
634  * function converts those variables into the appropriate
635  * register values, and can bring down the device if needed.
636  */
637 static void altera_tse_adjust_link(struct net_device *dev)
638 {
639         struct altera_tse_private *priv = netdev_priv(dev);
640         struct phy_device *phydev = priv->phydev;
641         int new_state = 0;
642 
643         /* only change config if there is a link */
644         spin_lock(&priv->mac_cfg_lock);
645         if (phydev->link) {
646                 /* Read old config */
647                 u32 cfg_reg = ioread32(&priv->mac_dev->command_config);
648 
649                 /* Check duplex */
650                 if (phydev->duplex != priv->oldduplex) {
651                         new_state = 1;
652                         if (!(phydev->duplex))
653                                 cfg_reg |= MAC_CMDCFG_HD_ENA;
654                         else
655                                 cfg_reg &= ~MAC_CMDCFG_HD_ENA;
656 
657                         netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n",
658                                    dev->name, phydev->duplex);
659 
660                         priv->oldduplex = phydev->duplex;
661                 }
662 
663                 /* Check speed */
664                 if (phydev->speed != priv->oldspeed) {
665                         new_state = 1;
666                         switch (phydev->speed) {
667                         case 1000:
668                                 cfg_reg |= MAC_CMDCFG_ETH_SPEED;
669                                 cfg_reg &= ~MAC_CMDCFG_ENA_10;
670                                 break;
671                         case 100:
672                                 cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
673                                 cfg_reg &= ~MAC_CMDCFG_ENA_10;
674                                 break;
675                         case 10:
676                                 cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
677                                 cfg_reg |= MAC_CMDCFG_ENA_10;
678                                 break;
679                         default:
680                                 if (netif_msg_link(priv))
681                                         netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n",
682                                                     phydev->speed);
683                                 break;
684                         }
685                         priv->oldspeed = phydev->speed;
686                 }
687                 iowrite32(cfg_reg, &priv->mac_dev->command_config);
688 
689                 if (!priv->oldlink) {
690                         new_state = 1;
691                         priv->oldlink = 1;
692                 }
693         } else if (priv->oldlink) {
694                 new_state = 1;
695                 priv->oldlink = 0;
696                 priv->oldspeed = 0;
697                 priv->oldduplex = -1;
698         }
699 
700         if (new_state && netif_msg_link(priv))
701                 phy_print_status(phydev);
702 
703         spin_unlock(&priv->mac_cfg_lock);
704 }
705 static struct phy_device *connect_local_phy(struct net_device *dev)
706 {
707         struct altera_tse_private *priv = netdev_priv(dev);
708         struct phy_device *phydev = NULL;
709         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
710 
711         if (priv->phy_addr != POLL_PHY) {
712                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
713                          priv->mdio->id, priv->phy_addr);
714 
715                 netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt);
716 
717                 phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
718                                      priv->phy_iface);
719                 if (IS_ERR(phydev))
720                         netdev_err(dev, "Could not attach to PHY\n");
721 
722         } else {
723                 int ret;
724                 phydev = phy_find_first(priv->mdio);
725                 if (phydev == NULL) {
726                         netdev_err(dev, "No PHY found\n");
727                         return phydev;
728                 }
729 
730                 ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link,
731                                 priv->phy_iface);
732                 if (ret != 0) {
733                         netdev_err(dev, "Could not attach to PHY\n");
734                         phydev = NULL;
735                 }
736         }
737         return phydev;
738 }
739 
740 static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
741 {
742         struct altera_tse_private *priv = netdev_priv(dev);
743         struct device_node *np = priv->device->of_node;
744         int ret = 0;
745 
746         priv->phy_iface = of_get_phy_mode(np);
747 
748         /* Avoid get phy addr and create mdio if no phy is present */
749         if (!priv->phy_iface)
750                 return 0;
751 
752         /* try to get PHY address from device tree, use PHY autodetection if
753          * no valid address is given
754          */
755 
756         if (of_property_read_u32(priv->device->of_node, "phy-addr",
757                          &priv->phy_addr)) {
758                 priv->phy_addr = POLL_PHY;
759         }
760 
761         if (!((priv->phy_addr == POLL_PHY) ||
762                   ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
763                 netdev_err(dev, "invalid phy-addr specified %d\n",
764                         priv->phy_addr);
765                 return -ENODEV;
766         }
767 
768         /* Create/attach to MDIO bus */
769         ret = altera_tse_mdio_create(dev,
770                                          atomic_add_return(1, &instance_count));
771 
772         if (ret)
773                 return -ENODEV;
774 
775         return 0;
776 }
777 
778 /* Initialize driver's PHY state, and attach to the PHY
779  */
780 static int init_phy(struct net_device *dev)
781 {
782         struct altera_tse_private *priv = netdev_priv(dev);
783         struct phy_device *phydev;
784         struct device_node *phynode;
785         bool fixed_link = false;
786         int rc = 0;
787 
788         /* Avoid init phy in case of no phy present */
789         if (!priv->phy_iface)
790                 return 0;
791 
792         priv->oldlink = 0;
793         priv->oldspeed = 0;
794         priv->oldduplex = -1;
795 
796         phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
797 
798         if (!phynode) {
799                 /* check if a fixed-link is defined in device-tree */
800                 if (of_phy_is_fixed_link(priv->device->of_node)) {
801                         rc = of_phy_register_fixed_link(priv->device->of_node);
802                         if (rc < 0) {
803                                 netdev_err(dev, "cannot register fixed PHY\n");
804                                 return rc;
805                         }
806 
807                         /* In the case of a fixed PHY, the DT node associated
808                          * to the PHY is the Ethernet MAC DT node.
809                          */
810                         phynode = of_node_get(priv->device->of_node);
811                         fixed_link = true;
812 
813                         netdev_dbg(dev, "fixed-link detected\n");
814                         phydev = of_phy_connect(dev, phynode,
815                                                 &altera_tse_adjust_link,
816                                                 0, priv->phy_iface);
817                 } else {
818                         netdev_dbg(dev, "no phy-handle found\n");
819                         if (!priv->mdio) {
820                                 netdev_err(dev, "No phy-handle nor local mdio specified\n");
821                                 return -ENODEV;
822                         }
823                         phydev = connect_local_phy(dev);
824                 }
825         } else {
826                 netdev_dbg(dev, "phy-handle found\n");
827                 phydev = of_phy_connect(dev, phynode,
828                         &altera_tse_adjust_link, 0, priv->phy_iface);
829         }
830 
831         if (!phydev) {
832                 netdev_err(dev, "Could not find the PHY\n");
833                 return -ENODEV;
834         }
835 
836         /* Stop Advertising 1000BASE Capability if interface is not GMII
837          * Note: Checkpatch throws CHECKs for the camel case defines below,
838          * it's ok to ignore.
839          */
840         if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) ||
841             (priv->phy_iface == PHY_INTERFACE_MODE_RMII))
842                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
843                                          SUPPORTED_1000baseT_Full);
844 
845         /* Broken HW is sometimes missing the pull-up resistor on the
846          * MDIO line, which results in reads to non-existent devices returning
847          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
848          * device as well. If a fixed-link is used the phy_id is always 0.
849          * Note: phydev->phy_id is the result of reading the UID PHY registers.
850          */
851         if ((phydev->phy_id == 0) && !fixed_link) {
852                 netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
853                 phy_disconnect(phydev);
854                 return -ENODEV;
855         }
856 
857         netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
858                    phydev->addr, phydev->phy_id, phydev->link);
859 
860         priv->phydev = phydev;
861         return 0;
862 }
863 
864 static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
865 {
866         u32 msb;
867         u32 lsb;
868 
869         msb = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
870         lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
871 
872         /* Set primary MAC address */
873         csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
874         csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
875 }
876 
877 /* MAC software reset.
878  * When reset is triggered, the MAC function completes the current
879  * transmission or reception, and subsequently disables the transmit and
880  * receive logic, flushes the receive FIFO buffer, and resets the statistics
881  * counters.
882  */
883 static int reset_mac(struct altera_tse_private *priv)
884 {
885         int counter;
886         u32 dat;
887 
888         dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
889         dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
890         dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
891         csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
892 
893         counter = 0;
894         while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
895                 if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
896                                      MAC_CMDCFG_SW_RESET))
897                         break;
898                 udelay(1);
899         }
900 
901         if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
902                 dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
903                 dat &= ~MAC_CMDCFG_SW_RESET;
904                 csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
905                 return -1;
906         }
907         return 0;
908 }
909 
910 /* Initialize MAC core registers
911 */
912 static int init_mac(struct altera_tse_private *priv)
913 {
914         unsigned int cmd = 0;
915         u32 frm_length;
916 
917         /* Setup Rx FIFO */
918         csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
919                 priv->mac_dev, tse_csroffs(rx_section_empty));
920 
921         csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
922                 tse_csroffs(rx_section_full));
923 
924         csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
925                 tse_csroffs(rx_almost_empty));
926 
927         csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
928                 tse_csroffs(rx_almost_full));
929 
930         /* Setup Tx FIFO */
931         csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
932                 priv->mac_dev, tse_csroffs(tx_section_empty));
933 
934         csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
935                 tse_csroffs(tx_section_full));
936 
937         csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
938                 tse_csroffs(tx_almost_empty));
939 
940         csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
941                 tse_csroffs(tx_almost_full));
942 
943         /* MAC Address Configuration */
944         tse_update_mac_addr(priv, priv->dev->dev_addr);
945 
946         /* MAC Function Configuration */
947         frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
948         csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
949 
950         csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
951                 tse_csroffs(tx_ipg_length));
952 
953         /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
954          * start address
955          */
956         tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
957                     ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
958 
959         tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
960                       ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
961                       ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
962 
963         /* Set the MAC options */
964         cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
965         cmd &= ~MAC_CMDCFG_PAD_EN;      /* No padding Removal on Receive */
966         cmd &= ~MAC_CMDCFG_CRC_FWD;     /* CRC Removal */
967         cmd |= MAC_CMDCFG_RX_ERR_DISC;  /* Automatically discard frames
968                                          * with CRC errors
969                                          */
970         cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
971         cmd &= ~MAC_CMDCFG_TX_ENA;
972         cmd &= ~MAC_CMDCFG_RX_ENA;
973 
974         /* Default speed and duplex setting, full/100 */
975         cmd &= ~MAC_CMDCFG_HD_ENA;
976         cmd &= ~MAC_CMDCFG_ETH_SPEED;
977         cmd &= ~MAC_CMDCFG_ENA_10;
978 
979         csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
980 
981         csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
982                 tse_csroffs(pause_quanta));
983 
984         if (netif_msg_hw(priv))
985                 dev_dbg(priv->device,
986                         "MAC post-initialization: CMD_CONFIG = 0x%08x\n", cmd);
987 
988         return 0;
989 }
990 
991 /* Start/stop MAC transmission logic
992  */
993 static void tse_set_mac(struct altera_tse_private *priv, bool enable)
994 {
995         u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
996 
997         if (enable)
998                 value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
999         else
1000                 value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
1001 
1002         csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
1003 }
1004 
1005 /* Change the MTU
1006  */
1007 static int tse_change_mtu(struct net_device *dev, int new_mtu)
1008 {
1009         struct altera_tse_private *priv = netdev_priv(dev);
1010         unsigned int max_mtu = priv->max_mtu;
1011         unsigned int min_mtu = ETH_ZLEN + ETH_FCS_LEN;
1012 
1013         if (netif_running(dev)) {
1014                 netdev_err(dev, "must be stopped to change its MTU\n");
1015                 return -EBUSY;
1016         }
1017 
1018         if ((new_mtu < min_mtu) || (new_mtu > max_mtu)) {
1019                 netdev_err(dev, "invalid MTU, max MTU is: %u\n", max_mtu);
1020                 return -EINVAL;
1021         }
1022 
1023         dev->mtu = new_mtu;
1024         netdev_update_features(dev);
1025 
1026         return 0;
1027 }
1028 
1029 static void altera_tse_set_mcfilter(struct net_device *dev)
1030 {
1031         struct altera_tse_private *priv = netdev_priv(dev);
1032         int i;
1033         struct netdev_hw_addr *ha;
1034 
1035         /* clear the hash filter */
1036         for (i = 0; i < 64; i++)
1037                 csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
1038 
1039         netdev_for_each_mc_addr(ha, dev) {
1040                 unsigned int hash = 0;
1041                 int mac_octet;
1042 
1043                 for (mac_octet = 5; mac_octet >= 0; mac_octet--) {
1044                         unsigned char xor_bit = 0;
1045                         unsigned char octet = ha->addr[mac_octet];
1046                         unsigned int bitshift;
1047 
1048                         for (bitshift = 0; bitshift < 8; bitshift++)
1049                                 xor_bit ^= ((octet >> bitshift) & 0x01);
1050 
1051                         hash = (hash << 1) | xor_bit;
1052                 }
1053                 csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
1054         }
1055 }
1056 
1057 
1058 static void altera_tse_set_mcfilterall(struct net_device *dev)
1059 {
1060         struct altera_tse_private *priv = netdev_priv(dev);
1061         int i;
1062 
1063         /* set the hash filter */
1064         for (i = 0; i < 64; i++)
1065                 csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
1066 }
1067 
1068 /* Set or clear the multicast filter for this adaptor
1069  */
1070 static void tse_set_rx_mode_hashfilter(struct net_device *dev)
1071 {
1072         struct altera_tse_private *priv = netdev_priv(dev);
1073 
1074         spin_lock(&priv->mac_cfg_lock);
1075 
1076         if (dev->flags & IFF_PROMISC)
1077                 tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1078                             MAC_CMDCFG_PROMIS_EN);
1079 
1080         if (dev->flags & IFF_ALLMULTI)
1081                 altera_tse_set_mcfilterall(dev);
1082         else
1083                 altera_tse_set_mcfilter(dev);
1084 
1085         spin_unlock(&priv->mac_cfg_lock);
1086 }
1087 
1088 /* Set or clear the multicast filter for this adaptor
1089  */
1090 static void tse_set_rx_mode(struct net_device *dev)
1091 {
1092         struct altera_tse_private *priv = netdev_priv(dev);
1093 
1094         spin_lock(&priv->mac_cfg_lock);
1095 
1096         if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
1097             !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
1098                 tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1099                             MAC_CMDCFG_PROMIS_EN);
1100         else
1101                 tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
1102                               MAC_CMDCFG_PROMIS_EN);
1103 
1104         spin_unlock(&priv->mac_cfg_lock);
1105 }
1106 
1107 /* Open and initialize the interface
1108  */
1109 static int tse_open(struct net_device *dev)
1110 {
1111         struct altera_tse_private *priv = netdev_priv(dev);
1112         int ret = 0;
1113         int i;
1114         unsigned long int flags;
1115 
1116         /* Reset and configure TSE MAC and probe associated PHY */
1117         ret = priv->dmaops->init_dma(priv);
1118         if (ret != 0) {
1119                 netdev_err(dev, "Cannot initialize DMA\n");
1120                 goto phy_error;
1121         }
1122 
1123         if (netif_msg_ifup(priv))
1124                 netdev_warn(dev, "device MAC address %pM\n",
1125                             dev->dev_addr);
1126 
1127         if ((priv->revision < 0xd00) || (priv->revision > 0xe00))
1128                 netdev_warn(dev, "TSE revision %x\n", priv->revision);
1129 
1130         spin_lock(&priv->mac_cfg_lock);
1131         ret = reset_mac(priv);
1132         /* Note that reset_mac will fail if the clocks are gated by the PHY
1133          * due to the PHY being put into isolation or power down mode.
1134          * This is not an error if reset fails due to no clock.
1135          */
1136         if (ret)
1137                 netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
1138 
1139         ret = init_mac(priv);
1140         spin_unlock(&priv->mac_cfg_lock);
1141         if (ret) {
1142                 netdev_err(dev, "Cannot init MAC core (error: %d)\n", ret);
1143                 goto alloc_skbuf_error;
1144         }
1145 
1146         priv->dmaops->reset_dma(priv);
1147 
1148         /* Create and initialize the TX/RX descriptors chains. */
1149         priv->rx_ring_size = dma_rx_num;
1150         priv->tx_ring_size = dma_tx_num;
1151         ret = alloc_init_skbufs(priv);
1152         if (ret) {
1153                 netdev_err(dev, "DMA descriptors initialization failed\n");
1154                 goto alloc_skbuf_error;
1155         }
1156 
1157 
1158         /* Register RX interrupt */
1159         ret = request_irq(priv->rx_irq, altera_isr, IRQF_SHARED,
1160                           dev->name, dev);
1161         if (ret) {
1162                 netdev_err(dev, "Unable to register RX interrupt %d\n",
1163                            priv->rx_irq);
1164                 goto init_error;
1165         }
1166 
1167         /* Register TX interrupt */
1168         ret = request_irq(priv->tx_irq, altera_isr, IRQF_SHARED,
1169                           dev->name, dev);
1170         if (ret) {
1171                 netdev_err(dev, "Unable to register TX interrupt %d\n",
1172                            priv->tx_irq);
1173                 goto tx_request_irq_error;
1174         }
1175 
1176         /* Enable DMA interrupts */
1177         spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
1178         priv->dmaops->enable_rxirq(priv);
1179         priv->dmaops->enable_txirq(priv);
1180 
1181         /* Setup RX descriptor chain */
1182         for (i = 0; i < priv->rx_ring_size; i++)
1183                 priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]);
1184 
1185         spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1186 
1187         if (priv->phydev)
1188                 phy_start(priv->phydev);
1189 
1190         napi_enable(&priv->napi);
1191         netif_start_queue(dev);
1192 
1193         priv->dmaops->start_rxdma(priv);
1194 
1195         /* Start MAC Rx/Tx */
1196         spin_lock(&priv->mac_cfg_lock);
1197         tse_set_mac(priv, true);
1198         spin_unlock(&priv->mac_cfg_lock);
1199 
1200         return 0;
1201 
1202 tx_request_irq_error:
1203         free_irq(priv->rx_irq, dev);
1204 init_error:
1205         free_skbufs(dev);
1206 alloc_skbuf_error:
1207 phy_error:
1208         return ret;
1209 }
1210 
1211 /* Stop TSE MAC interface and put the device in an inactive state
1212  */
1213 static int tse_shutdown(struct net_device *dev)
1214 {
1215         struct altera_tse_private *priv = netdev_priv(dev);
1216         int ret;
1217         unsigned long int flags;
1218 
1219         /* Stop the PHY */
1220         if (priv->phydev)
1221                 phy_stop(priv->phydev);
1222 
1223         netif_stop_queue(dev);
1224         napi_disable(&priv->napi);
1225 
1226         /* Disable DMA interrupts */
1227         spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
1228         priv->dmaops->disable_rxirq(priv);
1229         priv->dmaops->disable_txirq(priv);
1230         spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1231 
1232         /* Free the IRQ lines */
1233         free_irq(priv->rx_irq, dev);
1234         free_irq(priv->tx_irq, dev);
1235 
1236         /* disable and reset the MAC, empties fifo */
1237         spin_lock(&priv->mac_cfg_lock);
1238         spin_lock(&priv->tx_lock);
1239 
1240         ret = reset_mac(priv);
1241         /* Note that reset_mac will fail if the clocks are gated by the PHY
1242          * due to the PHY being put into isolation or power down mode.
1243          * This is not an error if reset fails due to no clock.
1244          */
1245         if (ret)
1246                 netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
1247         priv->dmaops->reset_dma(priv);
1248         free_skbufs(dev);
1249 
1250         spin_unlock(&priv->tx_lock);
1251         spin_unlock(&priv->mac_cfg_lock);
1252 
1253         priv->dmaops->uninit_dma(priv);
1254 
1255         return 0;
1256 }
1257 
1258 static struct net_device_ops altera_tse_netdev_ops = {
1259         .ndo_open               = tse_open,
1260         .ndo_stop               = tse_shutdown,
1261         .ndo_start_xmit         = tse_start_xmit,
1262         .ndo_set_mac_address    = eth_mac_addr,
1263         .ndo_set_rx_mode        = tse_set_rx_mode,
1264         .ndo_change_mtu         = tse_change_mtu,
1265         .ndo_validate_addr      = eth_validate_addr,
1266 };
1267 
1268 static int request_and_map(struct platform_device *pdev, const char *name,
1269                            struct resource **res, void __iomem **ptr)
1270 {
1271         struct resource *region;
1272         struct device *device = &pdev->dev;
1273 
1274         *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
1275         if (*res == NULL) {
1276                 dev_err(device, "resource %s not defined\n", name);
1277                 return -ENODEV;
1278         }
1279 
1280         region = devm_request_mem_region(device, (*res)->start,
1281                                          resource_size(*res), dev_name(device));
1282         if (region == NULL) {
1283                 dev_err(device, "unable to request %s\n", name);
1284                 return -EBUSY;
1285         }
1286 
1287         *ptr = devm_ioremap_nocache(device, region->start,
1288                                     resource_size(region));
1289         if (*ptr == NULL) {
1290                 dev_err(device, "ioremap_nocache of %s failed!", name);
1291                 return -ENOMEM;
1292         }
1293 
1294         return 0;
1295 }
1296 
1297 /* Probe Altera TSE MAC device
1298  */
1299 static int altera_tse_probe(struct platform_device *pdev)
1300 {
1301         struct net_device *ndev;
1302         int ret = -ENODEV;
1303         struct resource *control_port;
1304         struct resource *dma_res;
1305         struct altera_tse_private *priv;
1306         const unsigned char *macaddr;
1307         void __iomem *descmap;
1308         const struct of_device_id *of_id = NULL;
1309 
1310         ndev = alloc_etherdev(sizeof(struct altera_tse_private));
1311         if (!ndev) {
1312                 dev_err(&pdev->dev, "Could not allocate network device\n");
1313                 return -ENODEV;
1314         }
1315 
1316         SET_NETDEV_DEV(ndev, &pdev->dev);
1317 
1318         priv = netdev_priv(ndev);
1319         priv->device = &pdev->dev;
1320         priv->dev = ndev;
1321         priv->msg_enable = netif_msg_init(debug, default_msg_level);
1322 
1323         of_id = of_match_device(altera_tse_ids, &pdev->dev);
1324 
1325         if (of_id)
1326                 priv->dmaops = (struct altera_dmaops *)of_id->data;
1327 
1328 
1329         if (priv->dmaops &&
1330             priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) {
1331                 /* Get the mapped address to the SGDMA descriptor memory */
1332                 ret = request_and_map(pdev, "s1", &dma_res, &descmap);
1333                 if (ret)
1334                         goto err_free_netdev;
1335 
1336                 /* Start of that memory is for transmit descriptors */
1337                 priv->tx_dma_desc = descmap;
1338 
1339                 /* First half is for tx descriptors, other half for tx */
1340                 priv->txdescmem = resource_size(dma_res)/2;
1341 
1342                 priv->txdescmem_busaddr = (dma_addr_t)dma_res->start;
1343 
1344                 priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap +
1345                                                      priv->txdescmem));
1346                 priv->rxdescmem = resource_size(dma_res)/2;
1347                 priv->rxdescmem_busaddr = dma_res->start;
1348                 priv->rxdescmem_busaddr += priv->txdescmem;
1349 
1350                 if (upper_32_bits(priv->rxdescmem_busaddr)) {
1351                         dev_dbg(priv->device,
1352                                 "SGDMA bus addresses greater than 32-bits\n");
1353                         goto err_free_netdev;
1354                 }
1355                 if (upper_32_bits(priv->txdescmem_busaddr)) {
1356                         dev_dbg(priv->device,
1357                                 "SGDMA bus addresses greater than 32-bits\n");
1358                         goto err_free_netdev;
1359                 }
1360         } else if (priv->dmaops &&
1361                    priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
1362                 ret = request_and_map(pdev, "rx_resp", &dma_res,
1363                                       &priv->rx_dma_resp);
1364                 if (ret)
1365                         goto err_free_netdev;
1366 
1367                 ret = request_and_map(pdev, "tx_desc", &dma_res,
1368                                       &priv->tx_dma_desc);
1369                 if (ret)
1370                         goto err_free_netdev;
1371 
1372                 priv->txdescmem = resource_size(dma_res);
1373                 priv->txdescmem_busaddr = dma_res->start;
1374 
1375                 ret = request_and_map(pdev, "rx_desc", &dma_res,
1376                                       &priv->rx_dma_desc);
1377                 if (ret)
1378                         goto err_free_netdev;
1379 
1380                 priv->rxdescmem = resource_size(dma_res);
1381                 priv->rxdescmem_busaddr = dma_res->start;
1382 
1383         } else {
1384                 goto err_free_netdev;
1385         }
1386 
1387         if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
1388                 dma_set_coherent_mask(priv->device,
1389                                       DMA_BIT_MASK(priv->dmaops->dmamask));
1390         else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
1391                 dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
1392         else
1393                 goto err_free_netdev;
1394 
1395         /* MAC address space */
1396         ret = request_and_map(pdev, "control_port", &control_port,
1397                               (void __iomem **)&priv->mac_dev);
1398         if (ret)
1399                 goto err_free_netdev;
1400 
1401         /* xSGDMA Rx Dispatcher address space */
1402         ret = request_and_map(pdev, "rx_csr", &dma_res,
1403                               &priv->rx_dma_csr);
1404         if (ret)
1405                 goto err_free_netdev;
1406 
1407 
1408         /* xSGDMA Tx Dispatcher address space */
1409         ret = request_and_map(pdev, "tx_csr", &dma_res,
1410                               &priv->tx_dma_csr);
1411         if (ret)
1412                 goto err_free_netdev;
1413 
1414 
1415         /* Rx IRQ */
1416         priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq");
1417         if (priv->rx_irq == -ENXIO) {
1418                 dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
1419                 ret = -ENXIO;
1420                 goto err_free_netdev;
1421         }
1422 
1423         /* Tx IRQ */
1424         priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq");
1425         if (priv->tx_irq == -ENXIO) {
1426                 dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
1427                 ret = -ENXIO;
1428                 goto err_free_netdev;
1429         }
1430 
1431         /* get FIFO depths from device tree */
1432         if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1433                                  &priv->rx_fifo_depth)) {
1434                 dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
1435                 ret = -ENXIO;
1436                 goto err_free_netdev;
1437         }
1438 
1439         if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1440                                  &priv->tx_fifo_depth)) {
1441                 dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
1442                 ret = -ENXIO;
1443                 goto err_free_netdev;
1444         }
1445 
1446         /* get hash filter settings for this instance */
1447         priv->hash_filter =
1448                 of_property_read_bool(pdev->dev.of_node,
1449                                       "altr,has-hash-multicast-filter");
1450 
1451         /* Set hash filter to not set for now until the
1452          * multicast filter receive issue is debugged
1453          */
1454         priv->hash_filter = 0;
1455 
1456         /* get supplemental address settings for this instance */
1457         priv->added_unicast =
1458                 of_property_read_bool(pdev->dev.of_node,
1459                                       "altr,has-supplementary-unicast");
1460 
1461         /* Max MTU is 1500, ETH_DATA_LEN */
1462         priv->max_mtu = ETH_DATA_LEN;
1463 
1464         /* Get the max mtu from the device tree. Note that the
1465          * "max-frame-size" parameter is actually max mtu. Definition
1466          * in the ePAPR v1.1 spec and usage differ, so go with usage.
1467          */
1468         of_property_read_u32(pdev->dev.of_node, "max-frame-size",
1469                              &priv->max_mtu);
1470 
1471         /* The DMA buffer size already accounts for an alignment bias
1472          * to avoid unaligned access exceptions for the NIOS processor,
1473          */
1474         priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE;
1475 
1476         /* get default MAC address from device tree */
1477         macaddr = of_get_mac_address(pdev->dev.of_node);
1478         if (macaddr)
1479                 ether_addr_copy(ndev->dev_addr, macaddr);
1480         else
1481                 eth_hw_addr_random(ndev);
1482 
1483         /* get phy addr and create mdio */
1484         ret = altera_tse_phy_get_addr_mdio_create(ndev);
1485 
1486         if (ret)
1487                 goto err_free_netdev;
1488 
1489         /* initialize netdev */
1490         ndev->mem_start = control_port->start;
1491         ndev->mem_end = control_port->end;
1492         ndev->netdev_ops = &altera_tse_netdev_ops;
1493         altera_tse_set_ethtool_ops(ndev);
1494 
1495         altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
1496 
1497         if (priv->hash_filter)
1498                 altera_tse_netdev_ops.ndo_set_rx_mode =
1499                         tse_set_rx_mode_hashfilter;
1500 
1501         /* Scatter/gather IO is not supported,
1502          * so it is turned off
1503          */
1504         ndev->hw_features &= ~NETIF_F_SG;
1505         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
1506 
1507         /* VLAN offloading of tagging, stripping and filtering is not
1508          * supported by hardware, but driver will accommodate the
1509          * extra 4-byte VLAN tag for processing by upper layers
1510          */
1511         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1512 
1513         /* setup NAPI interface */
1514         netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT);
1515 
1516         spin_lock_init(&priv->mac_cfg_lock);
1517         spin_lock_init(&priv->tx_lock);
1518         spin_lock_init(&priv->rxdma_irq_lock);
1519 
1520         netif_carrier_off(ndev);
1521         ret = register_netdev(ndev);
1522         if (ret) {
1523                 dev_err(&pdev->dev, "failed to register TSE net device\n");
1524                 goto err_register_netdev;
1525         }
1526 
1527         platform_set_drvdata(pdev, ndev);
1528 
1529         priv->revision = ioread32(&priv->mac_dev->megacore_revision);
1530 
1531         if (netif_msg_probe(priv))
1532                 dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n",
1533                          (priv->revision >> 8) & 0xff,
1534                          priv->revision & 0xff,
1535                          (unsigned long) control_port->start, priv->rx_irq,
1536                          priv->tx_irq);
1537 
1538         ret = init_phy(ndev);
1539         if (ret != 0) {
1540                 netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
1541                 goto err_init_phy;
1542         }
1543         return 0;
1544 
1545 err_init_phy:
1546         unregister_netdev(ndev);
1547 err_register_netdev:
1548         netif_napi_del(&priv->napi);
1549         altera_tse_mdio_destroy(ndev);
1550 err_free_netdev:
1551         free_netdev(ndev);
1552         return ret;
1553 }
1554 
1555 /* Remove Altera TSE MAC device
1556  */
1557 static int altera_tse_remove(struct platform_device *pdev)
1558 {
1559         struct net_device *ndev = platform_get_drvdata(pdev);
1560         struct altera_tse_private *priv = netdev_priv(ndev);
1561 
1562         if (priv->phydev)
1563                 phy_disconnect(priv->phydev);
1564 
1565         platform_set_drvdata(pdev, NULL);
1566         altera_tse_mdio_destroy(ndev);
1567         unregister_netdev(ndev);
1568         free_netdev(ndev);
1569 
1570         return 0;
1571 }
1572 
1573 static const struct altera_dmaops altera_dtype_sgdma = {
1574         .altera_dtype = ALTERA_DTYPE_SGDMA,
1575         .dmamask = 32,
1576         .reset_dma = sgdma_reset,
1577         .enable_txirq = sgdma_enable_txirq,
1578         .enable_rxirq = sgdma_enable_rxirq,
1579         .disable_txirq = sgdma_disable_txirq,
1580         .disable_rxirq = sgdma_disable_rxirq,
1581         .clear_txirq = sgdma_clear_txirq,
1582         .clear_rxirq = sgdma_clear_rxirq,
1583         .tx_buffer = sgdma_tx_buffer,
1584         .tx_completions = sgdma_tx_completions,
1585         .add_rx_desc = sgdma_add_rx_desc,
1586         .get_rx_status = sgdma_rx_status,
1587         .init_dma = sgdma_initialize,
1588         .uninit_dma = sgdma_uninitialize,
1589         .start_rxdma = sgdma_start_rxdma,
1590 };
1591 
1592 static const struct altera_dmaops altera_dtype_msgdma = {
1593         .altera_dtype = ALTERA_DTYPE_MSGDMA,
1594         .dmamask = 64,
1595         .reset_dma = msgdma_reset,
1596         .enable_txirq = msgdma_enable_txirq,
1597         .enable_rxirq = msgdma_enable_rxirq,
1598         .disable_txirq = msgdma_disable_txirq,
1599         .disable_rxirq = msgdma_disable_rxirq,
1600         .clear_txirq = msgdma_clear_txirq,
1601         .clear_rxirq = msgdma_clear_rxirq,
1602         .tx_buffer = msgdma_tx_buffer,
1603         .tx_completions = msgdma_tx_completions,
1604         .add_rx_desc = msgdma_add_rx_desc,
1605         .get_rx_status = msgdma_rx_status,
1606         .init_dma = msgdma_initialize,
1607         .uninit_dma = msgdma_uninitialize,
1608         .start_rxdma = msgdma_start_rxdma,
1609 };
1610 
1611 static const struct of_device_id altera_tse_ids[] = {
1612         { .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, },
1613         { .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, },
1614         { .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, },
1615         {},
1616 };
1617 MODULE_DEVICE_TABLE(of, altera_tse_ids);
1618 
1619 static struct platform_driver altera_tse_driver = {
1620         .probe          = altera_tse_probe,
1621         .remove         = altera_tse_remove,
1622         .suspend        = NULL,
1623         .resume         = NULL,
1624         .driver         = {
1625                 .name   = ALTERA_TSE_RESOURCE_NAME,
1626                 .of_match_table = altera_tse_ids,
1627         },
1628 };
1629 
1630 module_platform_driver(altera_tse_driver);
1631 
1632 MODULE_AUTHOR("Altera Corporation");
1633 MODULE_DESCRIPTION("Altera Triple Speed Ethernet MAC driver");
1634 MODULE_LICENSE("GPL v2");
1635 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us