Version:  2.0.40 2.2.26 2.4.37 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16

Linux/drivers/net/ethernet/octeon/octeon_mgmt.c

  1 /*
  2  * This file is subject to the terms and conditions of the GNU General Public
  3  * License.  See the file "COPYING" in the main directory of this archive
  4  * for more details.
  5  *
  6  * Copyright (C) 2009-2012 Cavium, Inc
  7  */
  8 
  9 #include <linux/platform_device.h>
 10 #include <linux/dma-mapping.h>
 11 #include <linux/etherdevice.h>
 12 #include <linux/capability.h>
 13 #include <linux/net_tstamp.h>
 14 #include <linux/interrupt.h>
 15 #include <linux/netdevice.h>
 16 #include <linux/spinlock.h>
 17 #include <linux/if_vlan.h>
 18 #include <linux/of_mdio.h>
 19 #include <linux/module.h>
 20 #include <linux/of_net.h>
 21 #include <linux/init.h>
 22 #include <linux/slab.h>
 23 #include <linux/phy.h>
 24 #include <linux/io.h>
 25 
 26 #include <asm/octeon/octeon.h>
 27 #include <asm/octeon/cvmx-mixx-defs.h>
 28 #include <asm/octeon/cvmx-agl-defs.h>
 29 
 30 #define DRV_NAME "octeon_mgmt"
 31 #define DRV_VERSION "2.0"
 32 #define DRV_DESCRIPTION \
 33         "Cavium Networks Octeon MII (management) port Network Driver"
 34 
 35 #define OCTEON_MGMT_NAPI_WEIGHT 16
 36 
 37 /* Ring sizes that are powers of two allow for more efficient modulo
 38  * opertions.
 39  */
 40 #define OCTEON_MGMT_RX_RING_SIZE 512
 41 #define OCTEON_MGMT_TX_RING_SIZE 128
 42 
 43 /* Allow 8 bytes for vlan and FCS. */
 44 #define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
 45 
 46 union mgmt_port_ring_entry {
 47         u64 d64;
 48         struct {
 49 #define RING_ENTRY_CODE_DONE 0xf
 50 #define RING_ENTRY_CODE_MORE 0x10
 51 #ifdef __BIG_ENDIAN_BITFIELD
 52                 u64 reserved_62_63:2;
 53                 /* Length of the buffer/packet in bytes */
 54                 u64 len:14;
 55                 /* For TX, signals that the packet should be timestamped */
 56                 u64 tstamp:1;
 57                 /* The RX error code */
 58                 u64 code:7;
 59                 /* Physical address of the buffer */
 60                 u64 addr:40;
 61 #else
 62                 u64 addr:40;
 63                 u64 code:7;
 64                 u64 tstamp:1;
 65                 u64 len:14;
 66                 u64 reserved_62_63:2;
 67 #endif
 68         } s;
 69 };
 70 
 71 #define MIX_ORING1      0x0
 72 #define MIX_ORING2      0x8
 73 #define MIX_IRING1      0x10
 74 #define MIX_IRING2      0x18
 75 #define MIX_CTL         0x20
 76 #define MIX_IRHWM       0x28
 77 #define MIX_IRCNT       0x30
 78 #define MIX_ORHWM       0x38
 79 #define MIX_ORCNT       0x40
 80 #define MIX_ISR         0x48
 81 #define MIX_INTENA      0x50
 82 #define MIX_REMCNT      0x58
 83 #define MIX_BIST        0x78
 84 
 85 #define AGL_GMX_PRT_CFG                 0x10
 86 #define AGL_GMX_RX_FRM_CTL              0x18
 87 #define AGL_GMX_RX_FRM_MAX              0x30
 88 #define AGL_GMX_RX_JABBER               0x38
 89 #define AGL_GMX_RX_STATS_CTL            0x50
 90 
 91 #define AGL_GMX_RX_STATS_PKTS_DRP       0xb0
 92 #define AGL_GMX_RX_STATS_OCTS_DRP       0xb8
 93 #define AGL_GMX_RX_STATS_PKTS_BAD       0xc0
 94 
 95 #define AGL_GMX_RX_ADR_CTL              0x100
 96 #define AGL_GMX_RX_ADR_CAM_EN           0x108
 97 #define AGL_GMX_RX_ADR_CAM0             0x180
 98 #define AGL_GMX_RX_ADR_CAM1             0x188
 99 #define AGL_GMX_RX_ADR_CAM2             0x190
100 #define AGL_GMX_RX_ADR_CAM3             0x198
101 #define AGL_GMX_RX_ADR_CAM4             0x1a0
102 #define AGL_GMX_RX_ADR_CAM5             0x1a8
103 
104 #define AGL_GMX_TX_CLK                  0x208
105 #define AGL_GMX_TX_STATS_CTL            0x268
106 #define AGL_GMX_TX_CTL                  0x270
107 #define AGL_GMX_TX_STAT0                0x280
108 #define AGL_GMX_TX_STAT1                0x288
109 #define AGL_GMX_TX_STAT2                0x290
110 #define AGL_GMX_TX_STAT3                0x298
111 #define AGL_GMX_TX_STAT4                0x2a0
112 #define AGL_GMX_TX_STAT5                0x2a8
113 #define AGL_GMX_TX_STAT6                0x2b0
114 #define AGL_GMX_TX_STAT7                0x2b8
115 #define AGL_GMX_TX_STAT8                0x2c0
116 #define AGL_GMX_TX_STAT9                0x2c8
117 
118 struct octeon_mgmt {
119         struct net_device *netdev;
120         u64 mix;
121         u64 agl;
122         u64 agl_prt_ctl;
123         int port;
124         int irq;
125         bool has_rx_tstamp;
126         u64 *tx_ring;
127         dma_addr_t tx_ring_handle;
128         unsigned int tx_next;
129         unsigned int tx_next_clean;
130         unsigned int tx_current_fill;
131         /* The tx_list lock also protects the ring related variables */
132         struct sk_buff_head tx_list;
133 
134         /* RX variables only touched in napi_poll.  No locking necessary. */
135         u64 *rx_ring;
136         dma_addr_t rx_ring_handle;
137         unsigned int rx_next;
138         unsigned int rx_next_fill;
139         unsigned int rx_current_fill;
140         struct sk_buff_head rx_list;
141 
142         spinlock_t lock;
143         unsigned int last_duplex;
144         unsigned int last_link;
145         unsigned int last_speed;
146         struct device *dev;
147         struct napi_struct napi;
148         struct tasklet_struct tx_clean_tasklet;
149         struct phy_device *phydev;
150         struct device_node *phy_np;
151         resource_size_t mix_phys;
152         resource_size_t mix_size;
153         resource_size_t agl_phys;
154         resource_size_t agl_size;
155         resource_size_t agl_prt_ctl_phys;
156         resource_size_t agl_prt_ctl_size;
157 };
158 
159 static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
160 {
161         union cvmx_mixx_intena mix_intena;
162         unsigned long flags;
163 
164         spin_lock_irqsave(&p->lock, flags);
165         mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
166         mix_intena.s.ithena = enable ? 1 : 0;
167         cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
168         spin_unlock_irqrestore(&p->lock, flags);
169 }
170 
171 static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
172 {
173         union cvmx_mixx_intena mix_intena;
174         unsigned long flags;
175 
176         spin_lock_irqsave(&p->lock, flags);
177         mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
178         mix_intena.s.othena = enable ? 1 : 0;
179         cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
180         spin_unlock_irqrestore(&p->lock, flags);
181 }
182 
183 static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
184 {
185         octeon_mgmt_set_rx_irq(p, 1);
186 }
187 
188 static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
189 {
190         octeon_mgmt_set_rx_irq(p, 0);
191 }
192 
193 static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
194 {
195         octeon_mgmt_set_tx_irq(p, 1);
196 }
197 
198 static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
199 {
200         octeon_mgmt_set_tx_irq(p, 0);
201 }
202 
203 static unsigned int ring_max_fill(unsigned int ring_size)
204 {
205         return ring_size - 8;
206 }
207 
208 static unsigned int ring_size_to_bytes(unsigned int ring_size)
209 {
210         return ring_size * sizeof(union mgmt_port_ring_entry);
211 }
212 
213 static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
214 {
215         struct octeon_mgmt *p = netdev_priv(netdev);
216 
217         while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
218                 unsigned int size;
219                 union mgmt_port_ring_entry re;
220                 struct sk_buff *skb;
221 
222                 /* CN56XX pass 1 needs 8 bytes of padding.  */
223                 size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
224 
225                 skb = netdev_alloc_skb(netdev, size);
226                 if (!skb)
227                         break;
228                 skb_reserve(skb, NET_IP_ALIGN);
229                 __skb_queue_tail(&p->rx_list, skb);
230 
231                 re.d64 = 0;
232                 re.s.len = size;
233                 re.s.addr = dma_map_single(p->dev, skb->data,
234                                            size,
235                                            DMA_FROM_DEVICE);
236 
237                 /* Put it in the ring.  */
238                 p->rx_ring[p->rx_next_fill] = re.d64;
239                 dma_sync_single_for_device(p->dev, p->rx_ring_handle,
240                                            ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
241                                            DMA_BIDIRECTIONAL);
242                 p->rx_next_fill =
243                         (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
244                 p->rx_current_fill++;
245                 /* Ring the bell.  */
246                 cvmx_write_csr(p->mix + MIX_IRING2, 1);
247         }
248 }
249 
250 static ktime_t ptp_to_ktime(u64 ptptime)
251 {
252         ktime_t ktimebase;
253         u64 ptpbase;
254         unsigned long flags;
255 
256         local_irq_save(flags);
257         /* Fill the icache with the code */
258         ktime_get_real();
259         /* Flush all pending operations */
260         mb();
261         /* Read the time and PTP clock as close together as
262          * possible. It is important that this sequence take the same
263          * amount of time to reduce jitter
264          */
265         ktimebase = ktime_get_real();
266         ptpbase = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_HI);
267         local_irq_restore(flags);
268 
269         return ktime_sub_ns(ktimebase, ptpbase - ptptime);
270 }
271 
272 static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
273 {
274         union cvmx_mixx_orcnt mix_orcnt;
275         union mgmt_port_ring_entry re;
276         struct sk_buff *skb;
277         int cleaned = 0;
278         unsigned long flags;
279 
280         mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
281         while (mix_orcnt.s.orcnt) {
282                 spin_lock_irqsave(&p->tx_list.lock, flags);
283 
284                 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
285 
286                 if (mix_orcnt.s.orcnt == 0) {
287                         spin_unlock_irqrestore(&p->tx_list.lock, flags);
288                         break;
289                 }
290 
291                 dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
292                                         ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
293                                         DMA_BIDIRECTIONAL);
294 
295                 re.d64 = p->tx_ring[p->tx_next_clean];
296                 p->tx_next_clean =
297                         (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
298                 skb = __skb_dequeue(&p->tx_list);
299 
300                 mix_orcnt.u64 = 0;
301                 mix_orcnt.s.orcnt = 1;
302 
303                 /* Acknowledge to hardware that we have the buffer.  */
304                 cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64);
305                 p->tx_current_fill--;
306 
307                 spin_unlock_irqrestore(&p->tx_list.lock, flags);
308 
309                 dma_unmap_single(p->dev, re.s.addr, re.s.len,
310                                  DMA_TO_DEVICE);
311 
312                 /* Read the hardware TX timestamp if one was recorded */
313                 if (unlikely(re.s.tstamp)) {
314                         struct skb_shared_hwtstamps ts;
315                         /* Read the timestamp */
316                         u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
317                         /* Remove the timestamp from the FIFO */
318                         cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
319                         /* Tell the kernel about the timestamp */
320                         ts.syststamp = ptp_to_ktime(ns);
321                         ts.hwtstamp = ns_to_ktime(ns);
322                         skb_tstamp_tx(skb, &ts);
323                 }
324 
325                 dev_kfree_skb_any(skb);
326                 cleaned++;
327 
328                 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
329         }
330 
331         if (cleaned && netif_queue_stopped(p->netdev))
332                 netif_wake_queue(p->netdev);
333 }
334 
335 static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
336 {
337         struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
338         octeon_mgmt_clean_tx_buffers(p);
339         octeon_mgmt_enable_tx_irq(p);
340 }
341 
342 static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
343 {
344         struct octeon_mgmt *p = netdev_priv(netdev);
345         unsigned long flags;
346         u64 drop, bad;
347 
348         /* These reads also clear the count registers.  */
349         drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP);
350         bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD);
351 
352         if (drop || bad) {
353                 /* Do an atomic update. */
354                 spin_lock_irqsave(&p->lock, flags);
355                 netdev->stats.rx_errors += bad;
356                 netdev->stats.rx_dropped += drop;
357                 spin_unlock_irqrestore(&p->lock, flags);
358         }
359 }
360 
361 static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
362 {
363         struct octeon_mgmt *p = netdev_priv(netdev);
364         unsigned long flags;
365 
366         union cvmx_agl_gmx_txx_stat0 s0;
367         union cvmx_agl_gmx_txx_stat1 s1;
368 
369         /* These reads also clear the count registers.  */
370         s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0);
371         s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1);
372 
373         if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
374                 /* Do an atomic update. */
375                 spin_lock_irqsave(&p->lock, flags);
376                 netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
377                 netdev->stats.collisions += s1.s.scol + s1.s.mcol;
378                 spin_unlock_irqrestore(&p->lock, flags);
379         }
380 }
381 
382 /*
383  * Dequeue a receive skb and its corresponding ring entry.  The ring
384  * entry is returned, *pskb is updated to point to the skb.
385  */
386 static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
387                                          struct sk_buff **pskb)
388 {
389         union mgmt_port_ring_entry re;
390 
391         dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
392                                 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
393                                 DMA_BIDIRECTIONAL);
394 
395         re.d64 = p->rx_ring[p->rx_next];
396         p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
397         p->rx_current_fill--;
398         *pskb = __skb_dequeue(&p->rx_list);
399 
400         dma_unmap_single(p->dev, re.s.addr,
401                          ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
402                          DMA_FROM_DEVICE);
403 
404         return re.d64;
405 }
406 
407 
408 static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
409 {
410         struct net_device *netdev = p->netdev;
411         union cvmx_mixx_ircnt mix_ircnt;
412         union mgmt_port_ring_entry re;
413         struct sk_buff *skb;
414         struct sk_buff *skb2;
415         struct sk_buff *skb_new;
416         union mgmt_port_ring_entry re2;
417         int rc = 1;
418 
419 
420         re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
421         if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
422                 /* A good packet, send it up. */
423                 skb_put(skb, re.s.len);
424 good:
425                 /* Process the RX timestamp if it was recorded */
426                 if (p->has_rx_tstamp) {
427                         /* The first 8 bytes are the timestamp */
428                         u64 ns = *(u64 *)skb->data;
429                         struct skb_shared_hwtstamps *ts;
430                         ts = skb_hwtstamps(skb);
431                         ts->hwtstamp = ns_to_ktime(ns);
432                         ts->syststamp = ptp_to_ktime(ns);
433                         __skb_pull(skb, 8);
434                 }
435                 skb->protocol = eth_type_trans(skb, netdev);
436                 netdev->stats.rx_packets++;
437                 netdev->stats.rx_bytes += skb->len;
438                 netif_receive_skb(skb);
439                 rc = 0;
440         } else if (re.s.code == RING_ENTRY_CODE_MORE) {
441                 /* Packet split across skbs.  This can happen if we
442                  * increase the MTU.  Buffers that are already in the
443                  * rx ring can then end up being too small.  As the rx
444                  * ring is refilled, buffers sized for the new MTU
445                  * will be used and we should go back to the normal
446                  * non-split case.
447                  */
448                 skb_put(skb, re.s.len);
449                 do {
450                         re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
451                         if (re2.s.code != RING_ENTRY_CODE_MORE
452                                 && re2.s.code != RING_ENTRY_CODE_DONE)
453                                 goto split_error;
454                         skb_put(skb2,  re2.s.len);
455                         skb_new = skb_copy_expand(skb, 0, skb2->len,
456                                                   GFP_ATOMIC);
457                         if (!skb_new)
458                                 goto split_error;
459                         if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
460                                           skb2->len))
461                                 goto split_error;
462                         skb_put(skb_new, skb2->len);
463                         dev_kfree_skb_any(skb);
464                         dev_kfree_skb_any(skb2);
465                         skb = skb_new;
466                 } while (re2.s.code == RING_ENTRY_CODE_MORE);
467                 goto good;
468         } else {
469                 /* Some other error, discard it. */
470                 dev_kfree_skb_any(skb);
471                 /* Error statistics are accumulated in
472                  * octeon_mgmt_update_rx_stats.
473                  */
474         }
475         goto done;
476 split_error:
477         /* Discard the whole mess. */
478         dev_kfree_skb_any(skb);
479         dev_kfree_skb_any(skb2);
480         while (re2.s.code == RING_ENTRY_CODE_MORE) {
481                 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
482                 dev_kfree_skb_any(skb2);
483         }
484         netdev->stats.rx_errors++;
485 
486 done:
487         /* Tell the hardware we processed a packet.  */
488         mix_ircnt.u64 = 0;
489         mix_ircnt.s.ircnt = 1;
490         cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64);
491         return rc;
492 }
493 
494 static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
495 {
496         unsigned int work_done = 0;
497         union cvmx_mixx_ircnt mix_ircnt;
498         int rc;
499 
500         mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
501         while (work_done < budget && mix_ircnt.s.ircnt) {
502 
503                 rc = octeon_mgmt_receive_one(p);
504                 if (!rc)
505                         work_done++;
506 
507                 /* Check for more packets. */
508                 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
509         }
510 
511         octeon_mgmt_rx_fill_ring(p->netdev);
512 
513         return work_done;
514 }
515 
516 static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
517 {
518         struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
519         struct net_device *netdev = p->netdev;
520         unsigned int work_done = 0;
521 
522         work_done = octeon_mgmt_receive_packets(p, budget);
523 
524         if (work_done < budget) {
525                 /* We stopped because no more packets were available. */
526                 napi_complete(napi);
527                 octeon_mgmt_enable_rx_irq(p);
528         }
529         octeon_mgmt_update_rx_stats(netdev);
530 
531         return work_done;
532 }
533 
534 /* Reset the hardware to clean state.  */
535 static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
536 {
537         union cvmx_mixx_ctl mix_ctl;
538         union cvmx_mixx_bist mix_bist;
539         union cvmx_agl_gmx_bist agl_gmx_bist;
540 
541         mix_ctl.u64 = 0;
542         cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
543         do {
544                 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
545         } while (mix_ctl.s.busy);
546         mix_ctl.s.reset = 1;
547         cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
548         cvmx_read_csr(p->mix + MIX_CTL);
549         octeon_io_clk_delay(64);
550 
551         mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
552         if (mix_bist.u64)
553                 dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
554                         (unsigned long long)mix_bist.u64);
555 
556         agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
557         if (agl_gmx_bist.u64)
558                 dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
559                          (unsigned long long)agl_gmx_bist.u64);
560 }
561 
562 struct octeon_mgmt_cam_state {
563         u64 cam[6];
564         u64 cam_mask;
565         int cam_index;
566 };
567 
568 static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
569                                       unsigned char *addr)
570 {
571         int i;
572 
573         for (i = 0; i < 6; i++)
574                 cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
575         cs->cam_mask |= (1ULL << cs->cam_index);
576         cs->cam_index++;
577 }
578 
579 static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
580 {
581         struct octeon_mgmt *p = netdev_priv(netdev);
582         union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
583         union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
584         unsigned long flags;
585         unsigned int prev_packet_enable;
586         unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
587         unsigned int multicast_mode = 1; /* 1 - Reject all multicast.  */
588         struct octeon_mgmt_cam_state cam_state;
589         struct netdev_hw_addr *ha;
590         int available_cam_entries;
591 
592         memset(&cam_state, 0, sizeof(cam_state));
593 
594         if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
595                 cam_mode = 0;
596                 available_cam_entries = 8;
597         } else {
598                 /* One CAM entry for the primary address, leaves seven
599                  * for the secondary addresses.
600                  */
601                 available_cam_entries = 7 - netdev->uc.count;
602         }
603 
604         if (netdev->flags & IFF_MULTICAST) {
605                 if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
606                     netdev_mc_count(netdev) > available_cam_entries)
607                         multicast_mode = 2; /* 2 - Accept all multicast.  */
608                 else
609                         multicast_mode = 0; /* 0 - Use CAM.  */
610         }
611 
612         if (cam_mode == 1) {
613                 /* Add primary address. */
614                 octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
615                 netdev_for_each_uc_addr(ha, netdev)
616                         octeon_mgmt_cam_state_add(&cam_state, ha->addr);
617         }
618         if (multicast_mode == 0) {
619                 netdev_for_each_mc_addr(ha, netdev)
620                         octeon_mgmt_cam_state_add(&cam_state, ha->addr);
621         }
622 
623         spin_lock_irqsave(&p->lock, flags);
624 
625         /* Disable packet I/O. */
626         agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
627         prev_packet_enable = agl_gmx_prtx.s.en;
628         agl_gmx_prtx.s.en = 0;
629         cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
630 
631         adr_ctl.u64 = 0;
632         adr_ctl.s.cam_mode = cam_mode;
633         adr_ctl.s.mcst = multicast_mode;
634         adr_ctl.s.bcst = 1;     /* Allow broadcast */
635 
636         cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64);
637 
638         cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]);
639         cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]);
640         cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]);
641         cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]);
642         cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]);
643         cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]);
644         cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask);
645 
646         /* Restore packet I/O. */
647         agl_gmx_prtx.s.en = prev_packet_enable;
648         cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
649 
650         spin_unlock_irqrestore(&p->lock, flags);
651 }
652 
653 static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
654 {
655         int r = eth_mac_addr(netdev, addr);
656 
657         if (r)
658                 return r;
659 
660         octeon_mgmt_set_rx_filtering(netdev);
661 
662         return 0;
663 }
664 
665 static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
666 {
667         struct octeon_mgmt *p = netdev_priv(netdev);
668         int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
669 
670         /* Limit the MTU to make sure the ethernet packets are between
671          * 64 bytes and 16383 bytes.
672          */
673         if (size_without_fcs < 64 || size_without_fcs > 16383) {
674                 dev_warn(p->dev, "MTU must be between %d and %d.\n",
675                          64 - OCTEON_MGMT_RX_HEADROOM,
676                          16383 - OCTEON_MGMT_RX_HEADROOM);
677                 return -EINVAL;
678         }
679 
680         netdev->mtu = new_mtu;
681 
682         cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs);
683         cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
684                        (size_without_fcs + 7) & 0xfff8);
685 
686         return 0;
687 }
688 
689 static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
690 {
691         struct net_device *netdev = dev_id;
692         struct octeon_mgmt *p = netdev_priv(netdev);
693         union cvmx_mixx_isr mixx_isr;
694 
695         mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR);
696 
697         /* Clear any pending interrupts */
698         cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64);
699         cvmx_read_csr(p->mix + MIX_ISR);
700 
701         if (mixx_isr.s.irthresh) {
702                 octeon_mgmt_disable_rx_irq(p);
703                 napi_schedule(&p->napi);
704         }
705         if (mixx_isr.s.orthresh) {
706                 octeon_mgmt_disable_tx_irq(p);
707                 tasklet_schedule(&p->tx_clean_tasklet);
708         }
709 
710         return IRQ_HANDLED;
711 }
712 
713 static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
714                                       struct ifreq *rq, int cmd)
715 {
716         struct octeon_mgmt *p = netdev_priv(netdev);
717         struct hwtstamp_config config;
718         union cvmx_mio_ptp_clock_cfg ptp;
719         union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
720         bool have_hw_timestamps = false;
721 
722         if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
723                 return -EFAULT;
724 
725         if (config.flags) /* reserved for future extensions */
726                 return -EINVAL;
727 
728         /* Check the status of hardware for tiemstamps */
729         if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
730                 /* Get the current state of the PTP clock */
731                 ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
732                 if (!ptp.s.ext_clk_en) {
733                         /* The clock has not been configured to use an
734                          * external source.  Program it to use the main clock
735                          * reference.
736                          */
737                         u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate();
738                         if (!ptp.s.ptp_en)
739                                 cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp);
740                         pr_info("PTP Clock: Using sclk reference at %lld Hz\n",
741                                 (NSEC_PER_SEC << 32) / clock_comp);
742                 } else {
743                         /* The clock is already programmed to use a GPIO */
744                         u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP);
745                         pr_info("PTP Clock: Using GPIO %d at %lld Hz\n",
746                                 ptp.s.ext_clk_in,
747                                 (NSEC_PER_SEC << 32) / clock_comp);
748                 }
749 
750                 /* Enable the clock if it wasn't done already */
751                 if (!ptp.s.ptp_en) {
752                         ptp.s.ptp_en = 1;
753                         cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64);
754                 }
755                 have_hw_timestamps = true;
756         }
757 
758         if (!have_hw_timestamps)
759                 return -EINVAL;
760 
761         switch (config.tx_type) {
762         case HWTSTAMP_TX_OFF:
763         case HWTSTAMP_TX_ON:
764                 break;
765         default:
766                 return -ERANGE;
767         }
768 
769         switch (config.rx_filter) {
770         case HWTSTAMP_FILTER_NONE:
771                 p->has_rx_tstamp = false;
772                 rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
773                 rxx_frm_ctl.s.ptp_mode = 0;
774                 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
775                 break;
776         case HWTSTAMP_FILTER_ALL:
777         case HWTSTAMP_FILTER_SOME:
778         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
779         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
780         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
781         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
782         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
783         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
784         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
785         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
786         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
787         case HWTSTAMP_FILTER_PTP_V2_EVENT:
788         case HWTSTAMP_FILTER_PTP_V2_SYNC:
789         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
790                 p->has_rx_tstamp = have_hw_timestamps;
791                 config.rx_filter = HWTSTAMP_FILTER_ALL;
792                 if (p->has_rx_tstamp) {
793                         rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
794                         rxx_frm_ctl.s.ptp_mode = 1;
795                         cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
796                 }
797                 break;
798         default:
799                 return -ERANGE;
800         }
801 
802         if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
803                 return -EFAULT;
804 
805         return 0;
806 }
807 
808 static int octeon_mgmt_ioctl(struct net_device *netdev,
809                              struct ifreq *rq, int cmd)
810 {
811         struct octeon_mgmt *p = netdev_priv(netdev);
812 
813         switch (cmd) {
814         case SIOCSHWTSTAMP:
815                 return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
816         default:
817                 if (p->phydev)
818                         return phy_mii_ioctl(p->phydev, rq, cmd);
819                 return -EINVAL;
820         }
821 }
822 
823 static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
824 {
825         union cvmx_agl_gmx_prtx_cfg prtx_cfg;
826 
827         /* Disable GMX before we make any changes. */
828         prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
829         prtx_cfg.s.en = 0;
830         prtx_cfg.s.tx_en = 0;
831         prtx_cfg.s.rx_en = 0;
832         cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
833 
834         if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
835                 int i;
836                 for (i = 0; i < 10; i++) {
837                         prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
838                         if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1)
839                                 break;
840                         mdelay(1);
841                         i++;
842                 }
843         }
844 }
845 
846 static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
847 {
848         union cvmx_agl_gmx_prtx_cfg prtx_cfg;
849 
850         /* Restore the GMX enable state only if link is set */
851         prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
852         prtx_cfg.s.tx_en = 1;
853         prtx_cfg.s.rx_en = 1;
854         prtx_cfg.s.en = 1;
855         cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
856 }
857 
858 static void octeon_mgmt_update_link(struct octeon_mgmt *p)
859 {
860         union cvmx_agl_gmx_prtx_cfg prtx_cfg;
861 
862         prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
863 
864         if (!p->phydev->link)
865                 prtx_cfg.s.duplex = 1;
866         else
867                 prtx_cfg.s.duplex = p->phydev->duplex;
868 
869         switch (p->phydev->speed) {
870         case 10:
871                 prtx_cfg.s.speed = 0;
872                 prtx_cfg.s.slottime = 0;
873 
874                 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
875                         prtx_cfg.s.burst = 1;
876                         prtx_cfg.s.speed_msb = 1;
877                 }
878                 break;
879         case 100:
880                 prtx_cfg.s.speed = 0;
881                 prtx_cfg.s.slottime = 0;
882 
883                 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
884                         prtx_cfg.s.burst = 1;
885                         prtx_cfg.s.speed_msb = 0;
886                 }
887                 break;
888         case 1000:
889                 /* 1000 MBits is only supported on 6XXX chips */
890                 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
891                         prtx_cfg.s.speed = 1;
892                         prtx_cfg.s.speed_msb = 0;
893                         /* Only matters for half-duplex */
894                         prtx_cfg.s.slottime = 1;
895                         prtx_cfg.s.burst = p->phydev->duplex;
896                 }
897                 break;
898         case 0:  /* No link */
899         default:
900                 break;
901         }
902 
903         /* Write the new GMX setting with the port still disabled. */
904         cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
905 
906         /* Read GMX CFG again to make sure the config is completed. */
907         prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
908 
909         if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
910                 union cvmx_agl_gmx_txx_clk agl_clk;
911                 union cvmx_agl_prtx_ctl prtx_ctl;
912 
913                 prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
914                 agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK);
915                 /* MII (both speeds) and RGMII 1000 speed. */
916                 agl_clk.s.clk_cnt = 1;
917                 if (prtx_ctl.s.mode == 0) { /* RGMII mode */
918                         if (p->phydev->speed == 10)
919                                 agl_clk.s.clk_cnt = 50;
920                         else if (p->phydev->speed == 100)
921                                 agl_clk.s.clk_cnt = 5;
922                 }
923                 cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
924         }
925 }
926 
927 static void octeon_mgmt_adjust_link(struct net_device *netdev)
928 {
929         struct octeon_mgmt *p = netdev_priv(netdev);
930         unsigned long flags;
931         int link_changed = 0;
932 
933         if (!p->phydev)
934                 return;
935 
936         spin_lock_irqsave(&p->lock, flags);
937 
938 
939         if (!p->phydev->link && p->last_link)
940                 link_changed = -1;
941 
942         if (p->phydev->link
943             && (p->last_duplex != p->phydev->duplex
944                 || p->last_link != p->phydev->link
945                 || p->last_speed != p->phydev->speed)) {
946                 octeon_mgmt_disable_link(p);
947                 link_changed = 1;
948                 octeon_mgmt_update_link(p);
949                 octeon_mgmt_enable_link(p);
950         }
951 
952         p->last_link = p->phydev->link;
953         p->last_speed = p->phydev->speed;
954         p->last_duplex = p->phydev->duplex;
955 
956         spin_unlock_irqrestore(&p->lock, flags);
957 
958         if (link_changed != 0) {
959                 if (link_changed > 0) {
960                         pr_info("%s: Link is up - %d/%s\n", netdev->name,
961                                 p->phydev->speed,
962                                 DUPLEX_FULL == p->phydev->duplex ?
963                                 "Full" : "Half");
964                 } else {
965                         pr_info("%s: Link is down\n", netdev->name);
966                 }
967         }
968 }
969 
970 static int octeon_mgmt_init_phy(struct net_device *netdev)
971 {
972         struct octeon_mgmt *p = netdev_priv(netdev);
973 
974         if (octeon_is_simulation() || p->phy_np == NULL) {
975                 /* No PHYs in the simulator. */
976                 netif_carrier_on(netdev);
977                 return 0;
978         }
979 
980         p->phydev = of_phy_connect(netdev, p->phy_np,
981                                    octeon_mgmt_adjust_link, 0,
982                                    PHY_INTERFACE_MODE_MII);
983 
984         if (!p->phydev)
985                 return -ENODEV;
986 
987         return 0;
988 }
989 
990 static int octeon_mgmt_open(struct net_device *netdev)
991 {
992         struct octeon_mgmt *p = netdev_priv(netdev);
993         union cvmx_mixx_ctl mix_ctl;
994         union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
995         union cvmx_mixx_oring1 oring1;
996         union cvmx_mixx_iring1 iring1;
997         union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
998         union cvmx_mixx_irhwm mix_irhwm;
999         union cvmx_mixx_orhwm mix_orhwm;
1000         union cvmx_mixx_intena mix_intena;
1001         struct sockaddr sa;
1002 
1003         /* Allocate ring buffers.  */
1004         p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1005                              GFP_KERNEL);
1006         if (!p->tx_ring)
1007                 return -ENOMEM;
1008         p->tx_ring_handle =
1009                 dma_map_single(p->dev, p->tx_ring,
1010                                ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1011                                DMA_BIDIRECTIONAL);
1012         p->tx_next = 0;
1013         p->tx_next_clean = 0;
1014         p->tx_current_fill = 0;
1015 
1016 
1017         p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1018                              GFP_KERNEL);
1019         if (!p->rx_ring)
1020                 goto err_nomem;
1021         p->rx_ring_handle =
1022                 dma_map_single(p->dev, p->rx_ring,
1023                                ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1024                                DMA_BIDIRECTIONAL);
1025 
1026         p->rx_next = 0;
1027         p->rx_next_fill = 0;
1028         p->rx_current_fill = 0;
1029 
1030         octeon_mgmt_reset_hw(p);
1031 
1032         mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
1033 
1034         /* Bring it out of reset if needed. */
1035         if (mix_ctl.s.reset) {
1036                 mix_ctl.s.reset = 0;
1037                 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
1038                 do {
1039                         mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
1040                 } while (mix_ctl.s.reset);
1041         }
1042 
1043         if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
1044                 agl_gmx_inf_mode.u64 = 0;
1045                 agl_gmx_inf_mode.s.en = 1;
1046                 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
1047         }
1048         if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
1049                 || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
1050                 /* Force compensation values, as they are not
1051                  * determined properly by HW
1052                  */
1053                 union cvmx_agl_gmx_drv_ctl drv_ctl;
1054 
1055                 drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
1056                 if (p->port) {
1057                         drv_ctl.s.byp_en1 = 1;
1058                         drv_ctl.s.nctl1 = 6;
1059                         drv_ctl.s.pctl1 = 6;
1060                 } else {
1061                         drv_ctl.s.byp_en = 1;
1062                         drv_ctl.s.nctl = 6;
1063                         drv_ctl.s.pctl = 6;
1064                 }
1065                 cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
1066         }
1067 
1068         oring1.u64 = 0;
1069         oring1.s.obase = p->tx_ring_handle >> 3;
1070         oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
1071         cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);
1072 
1073         iring1.u64 = 0;
1074         iring1.s.ibase = p->rx_ring_handle >> 3;
1075         iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
1076         cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
1077 
1078         memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
1079         octeon_mgmt_set_mac_address(netdev, &sa);
1080 
1081         octeon_mgmt_change_mtu(netdev, netdev->mtu);
1082 
1083         /* Enable the port HW. Packets are not allowed until
1084          * cvmx_mgmt_port_enable() is called.
1085          */
1086         mix_ctl.u64 = 0;
1087         mix_ctl.s.crc_strip = 1;    /* Strip the ending CRC */
1088         mix_ctl.s.en = 1;           /* Enable the port */
1089         mix_ctl.s.nbtarb = 0;       /* Arbitration mode */
1090         /* MII CB-request FIFO programmable high watermark */
1091         mix_ctl.s.mrq_hwm = 1;
1092 #ifdef __LITTLE_ENDIAN
1093         mix_ctl.s.lendian = 1;
1094 #endif
1095         cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
1096 
1097         /* Read the PHY to find the mode of the interface. */
1098         if (octeon_mgmt_init_phy(netdev)) {
1099                 dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
1100                 goto err_noirq;
1101         }
1102 
1103         /* Set the mode of the interface, RGMII/MII. */
1104         if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) {
1105                 union cvmx_agl_prtx_ctl agl_prtx_ctl;
1106                 int rgmii_mode = (p->phydev->supported &
1107                                   (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;
1108 
1109                 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1110                 agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
1111                 cvmx_write_csr(p->agl_prt_ctl,  agl_prtx_ctl.u64);
1112 
1113                 /* MII clocks counts are based on the 125Mhz
1114                  * reference, which has an 8nS period. So our delays
1115                  * need to be multiplied by this factor.
1116                  */
1117 #define NS_PER_PHY_CLK 8
1118 
1119                 /* Take the DLL and clock tree out of reset */
1120                 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1121                 agl_prtx_ctl.s.clkrst = 0;
1122                 if (rgmii_mode) {
1123                         agl_prtx_ctl.s.dllrst = 0;
1124                         agl_prtx_ctl.s.clktx_byp = 0;
1125                 }
1126                 cvmx_write_csr(p->agl_prt_ctl,  agl_prtx_ctl.u64);
1127                 cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */
1128 
1129                 /* Wait for the DLL to lock. External 125 MHz
1130                  * reference clock must be stable at this point.
1131                  */
1132                 ndelay(256 * NS_PER_PHY_CLK);
1133 
1134                 /* Enable the interface */
1135                 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1136                 agl_prtx_ctl.s.enable = 1;
1137                 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1138 
1139                 /* Read the value back to force the previous write */
1140                 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1141 
1142                 /* Enable the compensation controller */
1143                 agl_prtx_ctl.s.comp = 1;
1144                 agl_prtx_ctl.s.drv_byp = 0;
1145                 cvmx_write_csr(p->agl_prt_ctl,  agl_prtx_ctl.u64);
1146                 /* Force write out before wait. */
1147                 cvmx_read_csr(p->agl_prt_ctl);
1148 
1149                 /* For compensation state to lock. */
1150                 ndelay(1040 * NS_PER_PHY_CLK);
1151 
1152                 /* Default Interframe Gaps are too small.  Recommended
1153                  * workaround is.
1154                  *
1155                  * AGL_GMX_TX_IFG[IFG1]=14
1156                  * AGL_GMX_TX_IFG[IFG2]=10
1157                  */
1158                 cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
1159         }
1160 
1161         octeon_mgmt_rx_fill_ring(netdev);
1162 
1163         /* Clear statistics. */
1164         /* Clear on read. */
1165         cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
1166         cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
1167         cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);
1168 
1169         cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
1170         cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
1171         cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);
1172 
1173         /* Clear any pending interrupts */
1174         cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));
1175 
1176         if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
1177                         netdev)) {
1178                 dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
1179                 goto err_noirq;
1180         }
1181 
1182         /* Interrupt every single RX packet */
1183         mix_irhwm.u64 = 0;
1184         mix_irhwm.s.irhwm = 0;
1185         cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);
1186 
1187         /* Interrupt when we have 1 or more packets to clean.  */
1188         mix_orhwm.u64 = 0;
1189         mix_orhwm.s.orhwm = 0;
1190         cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
1191 
1192         /* Enable receive and transmit interrupts */
1193         mix_intena.u64 = 0;
1194         mix_intena.s.ithena = 1;
1195         mix_intena.s.othena = 1;
1196         cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
1197 
1198         /* Enable packet I/O. */
1199 
1200         rxx_frm_ctl.u64 = 0;
1201         rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
1202         rxx_frm_ctl.s.pre_align = 1;
1203         /* When set, disables the length check for non-min sized pkts
1204          * with padding in the client data.
1205          */
1206         rxx_frm_ctl.s.pad_len = 1;
1207         /* When set, disables the length check for VLAN pkts */
1208         rxx_frm_ctl.s.vlan_len = 1;
1209         /* When set, PREAMBLE checking is  less strict */
1210         rxx_frm_ctl.s.pre_free = 1;
1211         /* Control Pause Frames can match station SMAC */
1212         rxx_frm_ctl.s.ctl_smac = 0;
1213         /* Control Pause Frames can match globally assign Multicast address */
1214         rxx_frm_ctl.s.ctl_mcst = 1;
1215         /* Forward pause information to TX block */
1216         rxx_frm_ctl.s.ctl_bck = 1;
1217         /* Drop Control Pause Frames */
1218         rxx_frm_ctl.s.ctl_drp = 1;
1219         /* Strip off the preamble */
1220         rxx_frm_ctl.s.pre_strp = 1;
1221         /* This port is configured to send PREAMBLE+SFD to begin every
1222          * frame.  GMX checks that the PREAMBLE is sent correctly.
1223          */
1224         rxx_frm_ctl.s.pre_chk = 1;
1225         cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
1226 
1227         /* Configure the port duplex, speed and enables */
1228         octeon_mgmt_disable_link(p);
1229         if (p->phydev)
1230                 octeon_mgmt_update_link(p);
1231         octeon_mgmt_enable_link(p);
1232 
1233         p->last_link = 0;
1234         p->last_speed = 0;
1235         /* PHY is not present in simulator. The carrier is enabled
1236          * while initializing the phy for simulator, leave it enabled.
1237          */
1238         if (p->phydev) {
1239                 netif_carrier_off(netdev);
1240                 phy_start_aneg(p->phydev);
1241         }
1242 
1243         netif_wake_queue(netdev);
1244         napi_enable(&p->napi);
1245 
1246         return 0;
1247 err_noirq:
1248         octeon_mgmt_reset_hw(p);
1249         dma_unmap_single(p->dev, p->rx_ring_handle,
1250                          ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1251                          DMA_BIDIRECTIONAL);
1252         kfree(p->rx_ring);
1253 err_nomem:
1254         dma_unmap_single(p->dev, p->tx_ring_handle,
1255                          ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1256                          DMA_BIDIRECTIONAL);
1257         kfree(p->tx_ring);
1258         return -ENOMEM;
1259 }
1260 
1261 static int octeon_mgmt_stop(struct net_device *netdev)
1262 {
1263         struct octeon_mgmt *p = netdev_priv(netdev);
1264 
1265         napi_disable(&p->napi);
1266         netif_stop_queue(netdev);
1267 
1268         if (p->phydev)
1269                 phy_disconnect(p->phydev);
1270         p->phydev = NULL;
1271 
1272         netif_carrier_off(netdev);
1273 
1274         octeon_mgmt_reset_hw(p);
1275 
1276         free_irq(p->irq, netdev);
1277 
1278         /* dma_unmap is a nop on Octeon, so just free everything.  */
1279         skb_queue_purge(&p->tx_list);
1280         skb_queue_purge(&p->rx_list);
1281 
1282         dma_unmap_single(p->dev, p->rx_ring_handle,
1283                          ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1284                          DMA_BIDIRECTIONAL);
1285         kfree(p->rx_ring);
1286 
1287         dma_unmap_single(p->dev, p->tx_ring_handle,
1288                          ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1289                          DMA_BIDIRECTIONAL);
1290         kfree(p->tx_ring);
1291 
1292         return 0;
1293 }
1294 
1295 static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
1296 {
1297         struct octeon_mgmt *p = netdev_priv(netdev);
1298         union mgmt_port_ring_entry re;
1299         unsigned long flags;
1300         int rv = NETDEV_TX_BUSY;
1301 
1302         re.d64 = 0;
1303         re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0);
1304         re.s.len = skb->len;
1305         re.s.addr = dma_map_single(p->dev, skb->data,
1306                                    skb->len,
1307                                    DMA_TO_DEVICE);
1308 
1309         spin_lock_irqsave(&p->tx_list.lock, flags);
1310 
1311         if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
1312                 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1313                 netif_stop_queue(netdev);
1314                 spin_lock_irqsave(&p->tx_list.lock, flags);
1315         }
1316 
1317         if (unlikely(p->tx_current_fill >=
1318                      ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
1319                 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1320                 dma_unmap_single(p->dev, re.s.addr, re.s.len,
1321                                  DMA_TO_DEVICE);
1322                 goto out;
1323         }
1324 
1325         __skb_queue_tail(&p->tx_list, skb);
1326 
1327         /* Put it in the ring.  */
1328         p->tx_ring[p->tx_next] = re.d64;
1329         p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
1330         p->tx_current_fill++;
1331 
1332         spin_unlock_irqrestore(&p->tx_list.lock, flags);
1333 
1334         dma_sync_single_for_device(p->dev, p->tx_ring_handle,
1335                                    ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1336                                    DMA_BIDIRECTIONAL);
1337 
1338         netdev->stats.tx_packets++;
1339         netdev->stats.tx_bytes += skb->len;
1340 
1341         /* Ring the bell.  */
1342         cvmx_write_csr(p->mix + MIX_ORING2, 1);
1343 
1344         netdev->trans_start = jiffies;
1345         rv = NETDEV_TX_OK;
1346 out:
1347         octeon_mgmt_update_tx_stats(netdev);
1348         return rv;
1349 }
1350 
1351 #ifdef CONFIG_NET_POLL_CONTROLLER
1352 static void octeon_mgmt_poll_controller(struct net_device *netdev)
1353 {
1354         struct octeon_mgmt *p = netdev_priv(netdev);
1355 
1356         octeon_mgmt_receive_packets(p, 16);
1357         octeon_mgmt_update_rx_stats(netdev);
1358 }
1359 #endif
1360 
1361 static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
1362                                     struct ethtool_drvinfo *info)
1363 {
1364         strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1365         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1366         strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
1367         strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
1368         info->n_stats = 0;
1369         info->testinfo_len = 0;
1370         info->regdump_len = 0;
1371         info->eedump_len = 0;
1372 }
1373 
1374 static int octeon_mgmt_get_settings(struct net_device *netdev,
1375                                     struct ethtool_cmd *cmd)
1376 {
1377         struct octeon_mgmt *p = netdev_priv(netdev);
1378 
1379         if (p->phydev)
1380                 return phy_ethtool_gset(p->phydev, cmd);
1381 
1382         return -EOPNOTSUPP;
1383 }
1384 
1385 static int octeon_mgmt_set_settings(struct net_device *netdev,
1386                                     struct ethtool_cmd *cmd)
1387 {
1388         struct octeon_mgmt *p = netdev_priv(netdev);
1389 
1390         if (!capable(CAP_NET_ADMIN))
1391                 return -EPERM;
1392 
1393         if (p->phydev)
1394                 return phy_ethtool_sset(p->phydev, cmd);
1395 
1396         return -EOPNOTSUPP;
1397 }
1398 
1399 static int octeon_mgmt_nway_reset(struct net_device *dev)
1400 {
1401         struct octeon_mgmt *p = netdev_priv(dev);
1402 
1403         if (!capable(CAP_NET_ADMIN))
1404                 return -EPERM;
1405 
1406         if (p->phydev)
1407                 return phy_start_aneg(p->phydev);
1408 
1409         return -EOPNOTSUPP;
1410 }
1411 
1412 static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
1413         .get_drvinfo = octeon_mgmt_get_drvinfo,
1414         .get_settings = octeon_mgmt_get_settings,
1415         .set_settings = octeon_mgmt_set_settings,
1416         .nway_reset = octeon_mgmt_nway_reset,
1417         .get_link = ethtool_op_get_link,
1418 };
1419 
1420 static const struct net_device_ops octeon_mgmt_ops = {
1421         .ndo_open =                     octeon_mgmt_open,
1422         .ndo_stop =                     octeon_mgmt_stop,
1423         .ndo_start_xmit =               octeon_mgmt_xmit,
1424         .ndo_set_rx_mode =              octeon_mgmt_set_rx_filtering,
1425         .ndo_set_mac_address =          octeon_mgmt_set_mac_address,
1426         .ndo_do_ioctl =                 octeon_mgmt_ioctl,
1427         .ndo_change_mtu =               octeon_mgmt_change_mtu,
1428 #ifdef CONFIG_NET_POLL_CONTROLLER
1429         .ndo_poll_controller =          octeon_mgmt_poll_controller,
1430 #endif
1431 };
1432 
1433 static int octeon_mgmt_probe(struct platform_device *pdev)
1434 {
1435         struct net_device *netdev;
1436         struct octeon_mgmt *p;
1437         const __be32 *data;
1438         const u8 *mac;
1439         struct resource *res_mix;
1440         struct resource *res_agl;
1441         struct resource *res_agl_prt_ctl;
1442         int len;
1443         int result;
1444 
1445         netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
1446         if (netdev == NULL)
1447                 return -ENOMEM;
1448 
1449         SET_NETDEV_DEV(netdev, &pdev->dev);
1450 
1451         platform_set_drvdata(pdev, netdev);
1452         p = netdev_priv(netdev);
1453         netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
1454                        OCTEON_MGMT_NAPI_WEIGHT);
1455 
1456         p->netdev = netdev;
1457         p->dev = &pdev->dev;
1458         p->has_rx_tstamp = false;
1459 
1460         data = of_get_property(pdev->dev.of_node, "cell-index", &len);
1461         if (data && len == sizeof(*data)) {
1462                 p->port = be32_to_cpup(data);
1463         } else {
1464                 dev_err(&pdev->dev, "no 'cell-index' property\n");
1465                 result = -ENXIO;
1466                 goto err;
1467         }
1468 
1469         snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
1470 
1471         result = platform_get_irq(pdev, 0);
1472         if (result < 0)
1473                 goto err;
1474 
1475         p->irq = result;
1476 
1477         res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1478         if (res_mix == NULL) {
1479                 dev_err(&pdev->dev, "no 'reg' resource\n");
1480                 result = -ENXIO;
1481                 goto err;
1482         }
1483 
1484         res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1485         if (res_agl == NULL) {
1486                 dev_err(&pdev->dev, "no 'reg' resource\n");
1487                 result = -ENXIO;
1488                 goto err;
1489         }
1490 
1491         res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1492         if (res_agl_prt_ctl == NULL) {
1493                 dev_err(&pdev->dev, "no 'reg' resource\n");
1494                 result = -ENXIO;
1495                 goto err;
1496         }
1497 
1498         p->mix_phys = res_mix->start;
1499         p->mix_size = resource_size(res_mix);
1500         p->agl_phys = res_agl->start;
1501         p->agl_size = resource_size(res_agl);
1502         p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
1503         p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);
1504 
1505 
1506         if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
1507                                      res_mix->name)) {
1508                 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1509                         res_mix->name);
1510                 result = -ENXIO;
1511                 goto err;
1512         }
1513 
1514         if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
1515                                      res_agl->name)) {
1516                 result = -ENXIO;
1517                 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1518                         res_agl->name);
1519                 goto err;
1520         }
1521 
1522         if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
1523                                      p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
1524                 result = -ENXIO;
1525                 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1526                         res_agl_prt_ctl->name);
1527                 goto err;
1528         }
1529 
1530         p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
1531         p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
1532         p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
1533                                            p->agl_prt_ctl_size);
1534         spin_lock_init(&p->lock);
1535 
1536         skb_queue_head_init(&p->tx_list);
1537         skb_queue_head_init(&p->rx_list);
1538         tasklet_init(&p->tx_clean_tasklet,
1539                      octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
1540 
1541         netdev->priv_flags |= IFF_UNICAST_FLT;
1542 
1543         netdev->netdev_ops = &octeon_mgmt_ops;
1544         netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1545 
1546         mac = of_get_mac_address(pdev->dev.of_node);
1547 
1548         if (mac)
1549                 memcpy(netdev->dev_addr, mac, ETH_ALEN);
1550         else
1551                 eth_hw_addr_random(netdev);
1552 
1553         p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1554 
1555         result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1556         if (result)
1557                 goto err;
1558 
1559         netif_carrier_off(netdev);
1560         result = register_netdev(netdev);
1561         if (result)
1562                 goto err;
1563 
1564         dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
1565         return 0;
1566 
1567 err:
1568         free_netdev(netdev);
1569         return result;
1570 }
1571 
1572 static int octeon_mgmt_remove(struct platform_device *pdev)
1573 {
1574         struct net_device *netdev = platform_get_drvdata(pdev);
1575 
1576         unregister_netdev(netdev);
1577         free_netdev(netdev);
1578         return 0;
1579 }
1580 
1581 static struct of_device_id octeon_mgmt_match[] = {
1582         {
1583                 .compatible = "cavium,octeon-5750-mix",
1584         },
1585         {},
1586 };
1587 MODULE_DEVICE_TABLE(of, octeon_mgmt_match);
1588 
1589 static struct platform_driver octeon_mgmt_driver = {
1590         .driver = {
1591                 .name           = "octeon_mgmt",
1592                 .owner          = THIS_MODULE,
1593                 .of_match_table = octeon_mgmt_match,
1594         },
1595         .probe          = octeon_mgmt_probe,
1596         .remove         = octeon_mgmt_remove,
1597 };
1598 
1599 extern void octeon_mdiobus_force_mod_depencency(void);
1600 
1601 static int __init octeon_mgmt_mod_init(void)
1602 {
1603         /* Force our mdiobus driver module to be loaded first. */
1604         octeon_mdiobus_force_mod_depencency();
1605         return platform_driver_register(&octeon_mgmt_driver);
1606 }
1607 
1608 static void __exit octeon_mgmt_mod_exit(void)
1609 {
1610         platform_driver_unregister(&octeon_mgmt_driver);
1611 }
1612 
1613 module_init(octeon_mgmt_mod_init);
1614 module_exit(octeon_mgmt_mod_exit);
1615 
1616 MODULE_DESCRIPTION(DRV_DESCRIPTION);
1617 MODULE_AUTHOR("David Daney");
1618 MODULE_LICENSE("GPL");
1619 MODULE_VERSION(DRV_VERSION);
1620 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us