Version:  2.0.40 2.2.26 2.4.37 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16

Linux/drivers/net/ethernet/renesas/sh_eth.c

  1 /*  SuperH Ethernet device driver
  2  *
  3  *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
  4  *  Copyright (C) 2008-2014 Renesas Solutions Corp.
  5  *  Copyright (C) 2013-2014 Cogent Embedded, Inc.
  6  *  Copyright (C) 2014 Codethink Limited
  7  *
  8  *  This program is free software; you can redistribute it and/or modify it
  9  *  under the terms and conditions of the GNU General Public License,
 10  *  version 2, as published by the Free Software Foundation.
 11  *
 12  *  This program is distributed in the hope it will be useful, but WITHOUT
 13  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 14  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 15  *  more details.
 16  *
 17  *  The full GNU General Public License is included in this distribution in
 18  *  the file called "COPYING".
 19  */
 20 
 21 #include <linux/module.h>
 22 #include <linux/kernel.h>
 23 #include <linux/spinlock.h>
 24 #include <linux/interrupt.h>
 25 #include <linux/dma-mapping.h>
 26 #include <linux/etherdevice.h>
 27 #include <linux/delay.h>
 28 #include <linux/platform_device.h>
 29 #include <linux/mdio-bitbang.h>
 30 #include <linux/netdevice.h>
 31 #include <linux/of.h>
 32 #include <linux/of_device.h>
 33 #include <linux/of_irq.h>
 34 #include <linux/of_net.h>
 35 #include <linux/phy.h>
 36 #include <linux/cache.h>
 37 #include <linux/io.h>
 38 #include <linux/pm_runtime.h>
 39 #include <linux/slab.h>
 40 #include <linux/ethtool.h>
 41 #include <linux/if_vlan.h>
 42 #include <linux/clk.h>
 43 #include <linux/sh_eth.h>
 44 #include <linux/of_mdio.h>
 45 
 46 #include "sh_eth.h"
 47 
 48 #define SH_ETH_DEF_MSG_ENABLE \
 49                 (NETIF_MSG_LINK | \
 50                 NETIF_MSG_TIMER | \
 51                 NETIF_MSG_RX_ERR| \
 52                 NETIF_MSG_TX_ERR)
 53 
 54 static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
 55         [EDSR]          = 0x0000,
 56         [EDMR]          = 0x0400,
 57         [EDTRR]         = 0x0408,
 58         [EDRRR]         = 0x0410,
 59         [EESR]          = 0x0428,
 60         [EESIPR]        = 0x0430,
 61         [TDLAR]         = 0x0010,
 62         [TDFAR]         = 0x0014,
 63         [TDFXR]         = 0x0018,
 64         [TDFFR]         = 0x001c,
 65         [RDLAR]         = 0x0030,
 66         [RDFAR]         = 0x0034,
 67         [RDFXR]         = 0x0038,
 68         [RDFFR]         = 0x003c,
 69         [TRSCER]        = 0x0438,
 70         [RMFCR]         = 0x0440,
 71         [TFTR]          = 0x0448,
 72         [FDR]           = 0x0450,
 73         [RMCR]          = 0x0458,
 74         [RPADIR]        = 0x0460,
 75         [FCFTR]         = 0x0468,
 76         [CSMR]          = 0x04E4,
 77 
 78         [ECMR]          = 0x0500,
 79         [ECSR]          = 0x0510,
 80         [ECSIPR]        = 0x0518,
 81         [PIR]           = 0x0520,
 82         [PSR]           = 0x0528,
 83         [PIPR]          = 0x052c,
 84         [RFLR]          = 0x0508,
 85         [APR]           = 0x0554,
 86         [MPR]           = 0x0558,
 87         [PFTCR]         = 0x055c,
 88         [PFRCR]         = 0x0560,
 89         [TPAUSER]       = 0x0564,
 90         [GECMR]         = 0x05b0,
 91         [BCULR]         = 0x05b4,
 92         [MAHR]          = 0x05c0,
 93         [MALR]          = 0x05c8,
 94         [TROCR]         = 0x0700,
 95         [CDCR]          = 0x0708,
 96         [LCCR]          = 0x0710,
 97         [CEFCR]         = 0x0740,
 98         [FRECR]         = 0x0748,
 99         [TSFRCR]        = 0x0750,
100         [TLFRCR]        = 0x0758,
101         [RFCR]          = 0x0760,
102         [CERCR]         = 0x0768,
103         [CEECR]         = 0x0770,
104         [MAFCR]         = 0x0778,
105         [RMII_MII]      = 0x0790,
106 
107         [ARSTR]         = 0x0000,
108         [TSU_CTRST]     = 0x0004,
109         [TSU_FWEN0]     = 0x0010,
110         [TSU_FWEN1]     = 0x0014,
111         [TSU_FCM]       = 0x0018,
112         [TSU_BSYSL0]    = 0x0020,
113         [TSU_BSYSL1]    = 0x0024,
114         [TSU_PRISL0]    = 0x0028,
115         [TSU_PRISL1]    = 0x002c,
116         [TSU_FWSL0]     = 0x0030,
117         [TSU_FWSL1]     = 0x0034,
118         [TSU_FWSLC]     = 0x0038,
119         [TSU_QTAG0]     = 0x0040,
120         [TSU_QTAG1]     = 0x0044,
121         [TSU_FWSR]      = 0x0050,
122         [TSU_FWINMK]    = 0x0054,
123         [TSU_ADQT0]     = 0x0048,
124         [TSU_ADQT1]     = 0x004c,
125         [TSU_VTAG0]     = 0x0058,
126         [TSU_VTAG1]     = 0x005c,
127         [TSU_ADSBSY]    = 0x0060,
128         [TSU_TEN]       = 0x0064,
129         [TSU_POST1]     = 0x0070,
130         [TSU_POST2]     = 0x0074,
131         [TSU_POST3]     = 0x0078,
132         [TSU_POST4]     = 0x007c,
133         [TSU_ADRH0]     = 0x0100,
134         [TSU_ADRL0]     = 0x0104,
135         [TSU_ADRH31]    = 0x01f8,
136         [TSU_ADRL31]    = 0x01fc,
137 
138         [TXNLCR0]       = 0x0080,
139         [TXALCR0]       = 0x0084,
140         [RXNLCR0]       = 0x0088,
141         [RXALCR0]       = 0x008c,
142         [FWNLCR0]       = 0x0090,
143         [FWALCR0]       = 0x0094,
144         [TXNLCR1]       = 0x00a0,
145         [TXALCR1]       = 0x00a0,
146         [RXNLCR1]       = 0x00a8,
147         [RXALCR1]       = 0x00ac,
148         [FWNLCR1]       = 0x00b0,
149         [FWALCR1]       = 0x00b4,
150 };
151 
152 static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
153         [EDSR]          = 0x0000,
154         [EDMR]          = 0x0400,
155         [EDTRR]         = 0x0408,
156         [EDRRR]         = 0x0410,
157         [EESR]          = 0x0428,
158         [EESIPR]        = 0x0430,
159         [TDLAR]         = 0x0010,
160         [TDFAR]         = 0x0014,
161         [TDFXR]         = 0x0018,
162         [TDFFR]         = 0x001c,
163         [RDLAR]         = 0x0030,
164         [RDFAR]         = 0x0034,
165         [RDFXR]         = 0x0038,
166         [RDFFR]         = 0x003c,
167         [TRSCER]        = 0x0438,
168         [RMFCR]         = 0x0440,
169         [TFTR]          = 0x0448,
170         [FDR]           = 0x0450,
171         [RMCR]          = 0x0458,
172         [RPADIR]        = 0x0460,
173         [FCFTR]         = 0x0468,
174         [CSMR]          = 0x04E4,
175 
176         [ECMR]          = 0x0500,
177         [RFLR]          = 0x0508,
178         [ECSR]          = 0x0510,
179         [ECSIPR]        = 0x0518,
180         [PIR]           = 0x0520,
181         [APR]           = 0x0554,
182         [MPR]           = 0x0558,
183         [PFTCR]         = 0x055c,
184         [PFRCR]         = 0x0560,
185         [TPAUSER]       = 0x0564,
186         [MAHR]          = 0x05c0,
187         [MALR]          = 0x05c8,
188         [CEFCR]         = 0x0740,
189         [FRECR]         = 0x0748,
190         [TSFRCR]        = 0x0750,
191         [TLFRCR]        = 0x0758,
192         [RFCR]          = 0x0760,
193         [MAFCR]         = 0x0778,
194 
195         [ARSTR]         = 0x0000,
196         [TSU_CTRST]     = 0x0004,
197         [TSU_VTAG0]     = 0x0058,
198         [TSU_ADSBSY]    = 0x0060,
199         [TSU_TEN]       = 0x0064,
200         [TSU_ADRH0]     = 0x0100,
201         [TSU_ADRL0]     = 0x0104,
202         [TSU_ADRH31]    = 0x01f8,
203         [TSU_ADRL31]    = 0x01fc,
204 
205         [TXNLCR0]       = 0x0080,
206         [TXALCR0]       = 0x0084,
207         [RXNLCR0]       = 0x0088,
208         [RXALCR0]       = 0x008C,
209 };
210 
211 static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
212         [ECMR]          = 0x0300,
213         [RFLR]          = 0x0308,
214         [ECSR]          = 0x0310,
215         [ECSIPR]        = 0x0318,
216         [PIR]           = 0x0320,
217         [PSR]           = 0x0328,
218         [RDMLR]         = 0x0340,
219         [IPGR]          = 0x0350,
220         [APR]           = 0x0354,
221         [MPR]           = 0x0358,
222         [RFCF]          = 0x0360,
223         [TPAUSER]       = 0x0364,
224         [TPAUSECR]      = 0x0368,
225         [MAHR]          = 0x03c0,
226         [MALR]          = 0x03c8,
227         [TROCR]         = 0x03d0,
228         [CDCR]          = 0x03d4,
229         [LCCR]          = 0x03d8,
230         [CNDCR]         = 0x03dc,
231         [CEFCR]         = 0x03e4,
232         [FRECR]         = 0x03e8,
233         [TSFRCR]        = 0x03ec,
234         [TLFRCR]        = 0x03f0,
235         [RFCR]          = 0x03f4,
236         [MAFCR]         = 0x03f8,
237 
238         [EDMR]          = 0x0200,
239         [EDTRR]         = 0x0208,
240         [EDRRR]         = 0x0210,
241         [TDLAR]         = 0x0218,
242         [RDLAR]         = 0x0220,
243         [EESR]          = 0x0228,
244         [EESIPR]        = 0x0230,
245         [TRSCER]        = 0x0238,
246         [RMFCR]         = 0x0240,
247         [TFTR]          = 0x0248,
248         [FDR]           = 0x0250,
249         [RMCR]          = 0x0258,
250         [TFUCR]         = 0x0264,
251         [RFOCR]         = 0x0268,
252         [RMIIMODE]      = 0x026c,
253         [FCFTR]         = 0x0270,
254         [TRIMD]         = 0x027c,
255 };
256 
257 static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
258         [ECMR]          = 0x0100,
259         [RFLR]          = 0x0108,
260         [ECSR]          = 0x0110,
261         [ECSIPR]        = 0x0118,
262         [PIR]           = 0x0120,
263         [PSR]           = 0x0128,
264         [RDMLR]         = 0x0140,
265         [IPGR]          = 0x0150,
266         [APR]           = 0x0154,
267         [MPR]           = 0x0158,
268         [TPAUSER]       = 0x0164,
269         [RFCF]          = 0x0160,
270         [TPAUSECR]      = 0x0168,
271         [BCFRR]         = 0x016c,
272         [MAHR]          = 0x01c0,
273         [MALR]          = 0x01c8,
274         [TROCR]         = 0x01d0,
275         [CDCR]          = 0x01d4,
276         [LCCR]          = 0x01d8,
277         [CNDCR]         = 0x01dc,
278         [CEFCR]         = 0x01e4,
279         [FRECR]         = 0x01e8,
280         [TSFRCR]        = 0x01ec,
281         [TLFRCR]        = 0x01f0,
282         [RFCR]          = 0x01f4,
283         [MAFCR]         = 0x01f8,
284         [RTRATE]        = 0x01fc,
285 
286         [EDMR]          = 0x0000,
287         [EDTRR]         = 0x0008,
288         [EDRRR]         = 0x0010,
289         [TDLAR]         = 0x0018,
290         [RDLAR]         = 0x0020,
291         [EESR]          = 0x0028,
292         [EESIPR]        = 0x0030,
293         [TRSCER]        = 0x0038,
294         [RMFCR]         = 0x0040,
295         [TFTR]          = 0x0048,
296         [FDR]           = 0x0050,
297         [RMCR]          = 0x0058,
298         [TFUCR]         = 0x0064,
299         [RFOCR]         = 0x0068,
300         [FCFTR]         = 0x0070,
301         [RPADIR]        = 0x0078,
302         [TRIMD]         = 0x007c,
303         [RBWAR]         = 0x00c8,
304         [RDFAR]         = 0x00cc,
305         [TBRAR]         = 0x00d4,
306         [TDFAR]         = 0x00d8,
307 };
308 
309 static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
310         [EDMR]          = 0x0000,
311         [EDTRR]         = 0x0004,
312         [EDRRR]         = 0x0008,
313         [TDLAR]         = 0x000c,
314         [RDLAR]         = 0x0010,
315         [EESR]          = 0x0014,
316         [EESIPR]        = 0x0018,
317         [TRSCER]        = 0x001c,
318         [RMFCR]         = 0x0020,
319         [TFTR]          = 0x0024,
320         [FDR]           = 0x0028,
321         [RMCR]          = 0x002c,
322         [EDOCR]         = 0x0030,
323         [FCFTR]         = 0x0034,
324         [RPADIR]        = 0x0038,
325         [TRIMD]         = 0x003c,
326         [RBWAR]         = 0x0040,
327         [RDFAR]         = 0x0044,
328         [TBRAR]         = 0x004c,
329         [TDFAR]         = 0x0050,
330 
331         [ECMR]          = 0x0160,
332         [ECSR]          = 0x0164,
333         [ECSIPR]        = 0x0168,
334         [PIR]           = 0x016c,
335         [MAHR]          = 0x0170,
336         [MALR]          = 0x0174,
337         [RFLR]          = 0x0178,
338         [PSR]           = 0x017c,
339         [TROCR]         = 0x0180,
340         [CDCR]          = 0x0184,
341         [LCCR]          = 0x0188,
342         [CNDCR]         = 0x018c,
343         [CEFCR]         = 0x0194,
344         [FRECR]         = 0x0198,
345         [TSFRCR]        = 0x019c,
346         [TLFRCR]        = 0x01a0,
347         [RFCR]          = 0x01a4,
348         [MAFCR]         = 0x01a8,
349         [IPGR]          = 0x01b4,
350         [APR]           = 0x01b8,
351         [MPR]           = 0x01bc,
352         [TPAUSER]       = 0x01c4,
353         [BCFR]          = 0x01cc,
354 
355         [ARSTR]         = 0x0000,
356         [TSU_CTRST]     = 0x0004,
357         [TSU_FWEN0]     = 0x0010,
358         [TSU_FWEN1]     = 0x0014,
359         [TSU_FCM]       = 0x0018,
360         [TSU_BSYSL0]    = 0x0020,
361         [TSU_BSYSL1]    = 0x0024,
362         [TSU_PRISL0]    = 0x0028,
363         [TSU_PRISL1]    = 0x002c,
364         [TSU_FWSL0]     = 0x0030,
365         [TSU_FWSL1]     = 0x0034,
366         [TSU_FWSLC]     = 0x0038,
367         [TSU_QTAGM0]    = 0x0040,
368         [TSU_QTAGM1]    = 0x0044,
369         [TSU_ADQT0]     = 0x0048,
370         [TSU_ADQT1]     = 0x004c,
371         [TSU_FWSR]      = 0x0050,
372         [TSU_FWINMK]    = 0x0054,
373         [TSU_ADSBSY]    = 0x0060,
374         [TSU_TEN]       = 0x0064,
375         [TSU_POST1]     = 0x0070,
376         [TSU_POST2]     = 0x0074,
377         [TSU_POST3]     = 0x0078,
378         [TSU_POST4]     = 0x007c,
379 
380         [TXNLCR0]       = 0x0080,
381         [TXALCR0]       = 0x0084,
382         [RXNLCR0]       = 0x0088,
383         [RXALCR0]       = 0x008c,
384         [FWNLCR0]       = 0x0090,
385         [FWALCR0]       = 0x0094,
386         [TXNLCR1]       = 0x00a0,
387         [TXALCR1]       = 0x00a0,
388         [RXNLCR1]       = 0x00a8,
389         [RXALCR1]       = 0x00ac,
390         [FWNLCR1]       = 0x00b0,
391         [FWALCR1]       = 0x00b4,
392 
393         [TSU_ADRH0]     = 0x0100,
394         [TSU_ADRL0]     = 0x0104,
395         [TSU_ADRL31]    = 0x01fc,
396 };
397 
398 static bool sh_eth_is_gether(struct sh_eth_private *mdp)
399 {
400         return mdp->reg_offset == sh_eth_offset_gigabit;
401 }
402 
403 static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
404 {
405         return mdp->reg_offset == sh_eth_offset_fast_rz;
406 }
407 
408 static void sh_eth_select_mii(struct net_device *ndev)
409 {
410         u32 value = 0x0;
411         struct sh_eth_private *mdp = netdev_priv(ndev);
412 
413         switch (mdp->phy_interface) {
414         case PHY_INTERFACE_MODE_GMII:
415                 value = 0x2;
416                 break;
417         case PHY_INTERFACE_MODE_MII:
418                 value = 0x1;
419                 break;
420         case PHY_INTERFACE_MODE_RMII:
421                 value = 0x0;
422                 break;
423         default:
424                 netdev_warn(ndev,
425                             "PHY interface mode was not setup. Set to MII.\n");
426                 value = 0x1;
427                 break;
428         }
429 
430         sh_eth_write(ndev, value, RMII_MII);
431 }
432 
433 static void sh_eth_set_duplex(struct net_device *ndev)
434 {
435         struct sh_eth_private *mdp = netdev_priv(ndev);
436 
437         if (mdp->duplex) /* Full */
438                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
439         else            /* Half */
440                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
441 }
442 
443 /* There is CPU dependent code */
444 static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
445 {
446         struct sh_eth_private *mdp = netdev_priv(ndev);
447 
448         switch (mdp->speed) {
449         case 10: /* 10BASE */
450                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
451                 break;
452         case 100:/* 100BASE */
453                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
454                 break;
455         default:
456                 break;
457         }
458 }
459 
460 /* R8A7778/9 */
461 static struct sh_eth_cpu_data r8a777x_data = {
462         .set_duplex     = sh_eth_set_duplex,
463         .set_rate       = sh_eth_set_rate_r8a777x,
464 
465         .register_type  = SH_ETH_REG_FAST_RCAR,
466 
467         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
468         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
469         .eesipr_value   = 0x01ff009f,
470 
471         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
472         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
473                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
474                           EESR_ECI,
475 
476         .apr            = 1,
477         .mpr            = 1,
478         .tpauser        = 1,
479         .hw_swap        = 1,
480 };
481 
482 /* R8A7790/1 */
483 static struct sh_eth_cpu_data r8a779x_data = {
484         .set_duplex     = sh_eth_set_duplex,
485         .set_rate       = sh_eth_set_rate_r8a777x,
486 
487         .register_type  = SH_ETH_REG_FAST_RCAR,
488 
489         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
490         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
491         .eesipr_value   = 0x01ff009f,
492 
493         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
494         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
495                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
496                           EESR_ECI,
497 
498         .apr            = 1,
499         .mpr            = 1,
500         .tpauser        = 1,
501         .hw_swap        = 1,
502         .rmiimode       = 1,
503         .shift_rd0      = 1,
504 };
505 
506 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
507 {
508         struct sh_eth_private *mdp = netdev_priv(ndev);
509 
510         switch (mdp->speed) {
511         case 10: /* 10BASE */
512                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
513                 break;
514         case 100:/* 100BASE */
515                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
516                 break;
517         default:
518                 break;
519         }
520 }
521 
522 /* SH7724 */
523 static struct sh_eth_cpu_data sh7724_data = {
524         .set_duplex     = sh_eth_set_duplex,
525         .set_rate       = sh_eth_set_rate_sh7724,
526 
527         .register_type  = SH_ETH_REG_FAST_SH4,
528 
529         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
530         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
531         .eesipr_value   = 0x01ff009f,
532 
533         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
534         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
535                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
536                           EESR_ECI,
537 
538         .apr            = 1,
539         .mpr            = 1,
540         .tpauser        = 1,
541         .hw_swap        = 1,
542         .rpadir         = 1,
543         .rpadir_value   = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
544 };
545 
546 static void sh_eth_set_rate_sh7757(struct net_device *ndev)
547 {
548         struct sh_eth_private *mdp = netdev_priv(ndev);
549 
550         switch (mdp->speed) {
551         case 10: /* 10BASE */
552                 sh_eth_write(ndev, 0, RTRATE);
553                 break;
554         case 100:/* 100BASE */
555                 sh_eth_write(ndev, 1, RTRATE);
556                 break;
557         default:
558                 break;
559         }
560 }
561 
562 /* SH7757 */
563 static struct sh_eth_cpu_data sh7757_data = {
564         .set_duplex     = sh_eth_set_duplex,
565         .set_rate       = sh_eth_set_rate_sh7757,
566 
567         .register_type  = SH_ETH_REG_FAST_SH4,
568 
569         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
570 
571         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
572         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
573                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
574                           EESR_ECI,
575 
576         .irq_flags      = IRQF_SHARED,
577         .apr            = 1,
578         .mpr            = 1,
579         .tpauser        = 1,
580         .hw_swap        = 1,
581         .no_ade         = 1,
582         .rpadir         = 1,
583         .rpadir_value   = 2 << 16,
584 };
585 
586 #define SH_GIGA_ETH_BASE        0xfee00000UL
587 #define GIGA_MALR(port)         (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
588 #define GIGA_MAHR(port)         (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
589 static void sh_eth_chip_reset_giga(struct net_device *ndev)
590 {
591         int i;
592         unsigned long mahr[2], malr[2];
593 
594         /* save MAHR and MALR */
595         for (i = 0; i < 2; i++) {
596                 malr[i] = ioread32((void *)GIGA_MALR(i));
597                 mahr[i] = ioread32((void *)GIGA_MAHR(i));
598         }
599 
600         /* reset device */
601         iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
602         mdelay(1);
603 
604         /* restore MAHR and MALR */
605         for (i = 0; i < 2; i++) {
606                 iowrite32(malr[i], (void *)GIGA_MALR(i));
607                 iowrite32(mahr[i], (void *)GIGA_MAHR(i));
608         }
609 }
610 
611 static void sh_eth_set_rate_giga(struct net_device *ndev)
612 {
613         struct sh_eth_private *mdp = netdev_priv(ndev);
614 
615         switch (mdp->speed) {
616         case 10: /* 10BASE */
617                 sh_eth_write(ndev, 0x00000000, GECMR);
618                 break;
619         case 100:/* 100BASE */
620                 sh_eth_write(ndev, 0x00000010, GECMR);
621                 break;
622         case 1000: /* 1000BASE */
623                 sh_eth_write(ndev, 0x00000020, GECMR);
624                 break;
625         default:
626                 break;
627         }
628 }
629 
630 /* SH7757(GETHERC) */
631 static struct sh_eth_cpu_data sh7757_data_giga = {
632         .chip_reset     = sh_eth_chip_reset_giga,
633         .set_duplex     = sh_eth_set_duplex,
634         .set_rate       = sh_eth_set_rate_giga,
635 
636         .register_type  = SH_ETH_REG_GIGABIT,
637 
638         .ecsr_value     = ECSR_ICD | ECSR_MPD,
639         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
640         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
641 
642         .tx_check       = EESR_TC1 | EESR_FTC,
643         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
644                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
645                           EESR_TDE | EESR_ECI,
646         .fdr_value      = 0x0000072f,
647 
648         .irq_flags      = IRQF_SHARED,
649         .apr            = 1,
650         .mpr            = 1,
651         .tpauser        = 1,
652         .bculr          = 1,
653         .hw_swap        = 1,
654         .rpadir         = 1,
655         .rpadir_value   = 2 << 16,
656         .no_trimd       = 1,
657         .no_ade         = 1,
658         .tsu            = 1,
659 };
660 
661 static void sh_eth_chip_reset(struct net_device *ndev)
662 {
663         struct sh_eth_private *mdp = netdev_priv(ndev);
664 
665         /* reset device */
666         sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
667         mdelay(1);
668 }
669 
670 static void sh_eth_set_rate_gether(struct net_device *ndev)
671 {
672         struct sh_eth_private *mdp = netdev_priv(ndev);
673 
674         switch (mdp->speed) {
675         case 10: /* 10BASE */
676                 sh_eth_write(ndev, GECMR_10, GECMR);
677                 break;
678         case 100:/* 100BASE */
679                 sh_eth_write(ndev, GECMR_100, GECMR);
680                 break;
681         case 1000: /* 1000BASE */
682                 sh_eth_write(ndev, GECMR_1000, GECMR);
683                 break;
684         default:
685                 break;
686         }
687 }
688 
689 /* SH7734 */
690 static struct sh_eth_cpu_data sh7734_data = {
691         .chip_reset     = sh_eth_chip_reset,
692         .set_duplex     = sh_eth_set_duplex,
693         .set_rate       = sh_eth_set_rate_gether,
694 
695         .register_type  = SH_ETH_REG_GIGABIT,
696 
697         .ecsr_value     = ECSR_ICD | ECSR_MPD,
698         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
699         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
700 
701         .tx_check       = EESR_TC1 | EESR_FTC,
702         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
703                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
704                           EESR_TDE | EESR_ECI,
705 
706         .apr            = 1,
707         .mpr            = 1,
708         .tpauser        = 1,
709         .bculr          = 1,
710         .hw_swap        = 1,
711         .no_trimd       = 1,
712         .no_ade         = 1,
713         .tsu            = 1,
714         .hw_crc         = 1,
715         .select_mii     = 1,
716 };
717 
718 /* SH7763 */
719 static struct sh_eth_cpu_data sh7763_data = {
720         .chip_reset     = sh_eth_chip_reset,
721         .set_duplex     = sh_eth_set_duplex,
722         .set_rate       = sh_eth_set_rate_gether,
723 
724         .register_type  = SH_ETH_REG_GIGABIT,
725 
726         .ecsr_value     = ECSR_ICD | ECSR_MPD,
727         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
728         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
729 
730         .tx_check       = EESR_TC1 | EESR_FTC,
731         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
732                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
733                           EESR_ECI,
734 
735         .apr            = 1,
736         .mpr            = 1,
737         .tpauser        = 1,
738         .bculr          = 1,
739         .hw_swap        = 1,
740         .no_trimd       = 1,
741         .no_ade         = 1,
742         .tsu            = 1,
743         .irq_flags      = IRQF_SHARED,
744 };
745 
746 static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
747 {
748         struct sh_eth_private *mdp = netdev_priv(ndev);
749 
750         /* reset device */
751         sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
752         mdelay(1);
753 
754         sh_eth_select_mii(ndev);
755 }
756 
757 /* R8A7740 */
758 static struct sh_eth_cpu_data r8a7740_data = {
759         .chip_reset     = sh_eth_chip_reset_r8a7740,
760         .set_duplex     = sh_eth_set_duplex,
761         .set_rate       = sh_eth_set_rate_gether,
762 
763         .register_type  = SH_ETH_REG_GIGABIT,
764 
765         .ecsr_value     = ECSR_ICD | ECSR_MPD,
766         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
767         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
768 
769         .tx_check       = EESR_TC1 | EESR_FTC,
770         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
771                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
772                           EESR_TDE | EESR_ECI,
773         .fdr_value      = 0x0000070f,
774 
775         .apr            = 1,
776         .mpr            = 1,
777         .tpauser        = 1,
778         .bculr          = 1,
779         .hw_swap        = 1,
780         .rpadir         = 1,
781         .rpadir_value   = 2 << 16,
782         .no_trimd       = 1,
783         .no_ade         = 1,
784         .tsu            = 1,
785         .select_mii     = 1,
786         .shift_rd0      = 1,
787 };
788 
789 /* R7S72100 */
790 static struct sh_eth_cpu_data r7s72100_data = {
791         .chip_reset     = sh_eth_chip_reset,
792         .set_duplex     = sh_eth_set_duplex,
793 
794         .register_type  = SH_ETH_REG_FAST_RZ,
795 
796         .ecsr_value     = ECSR_ICD,
797         .ecsipr_value   = ECSIPR_ICDIP,
798         .eesipr_value   = 0xff7f009f,
799 
800         .tx_check       = EESR_TC1 | EESR_FTC,
801         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
802                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
803                           EESR_TDE | EESR_ECI,
804         .fdr_value      = 0x0000070f,
805 
806         .no_psr         = 1,
807         .apr            = 1,
808         .mpr            = 1,
809         .tpauser        = 1,
810         .hw_swap        = 1,
811         .rpadir         = 1,
812         .rpadir_value   = 2 << 16,
813         .no_trimd       = 1,
814         .no_ade         = 1,
815         .hw_crc         = 1,
816         .tsu            = 1,
817         .shift_rd0      = 1,
818 };
819 
820 static struct sh_eth_cpu_data sh7619_data = {
821         .register_type  = SH_ETH_REG_FAST_SH3_SH2,
822 
823         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
824 
825         .apr            = 1,
826         .mpr            = 1,
827         .tpauser        = 1,
828         .hw_swap        = 1,
829 };
830 
831 static struct sh_eth_cpu_data sh771x_data = {
832         .register_type  = SH_ETH_REG_FAST_SH3_SH2,
833 
834         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
835         .tsu            = 1,
836 };
837 
838 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
839 {
840         if (!cd->ecsr_value)
841                 cd->ecsr_value = DEFAULT_ECSR_INIT;
842 
843         if (!cd->ecsipr_value)
844                 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
845 
846         if (!cd->fcftr_value)
847                 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
848                                   DEFAULT_FIFO_F_D_RFD;
849 
850         if (!cd->fdr_value)
851                 cd->fdr_value = DEFAULT_FDR_INIT;
852 
853         if (!cd->tx_check)
854                 cd->tx_check = DEFAULT_TX_CHECK;
855 
856         if (!cd->eesr_err_check)
857                 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
858 }
859 
860 static int sh_eth_check_reset(struct net_device *ndev)
861 {
862         int ret = 0;
863         int cnt = 100;
864 
865         while (cnt > 0) {
866                 if (!(sh_eth_read(ndev, EDMR) & 0x3))
867                         break;
868                 mdelay(1);
869                 cnt--;
870         }
871         if (cnt <= 0) {
872                 netdev_err(ndev, "Device reset failed\n");
873                 ret = -ETIMEDOUT;
874         }
875         return ret;
876 }
877 
878 static int sh_eth_reset(struct net_device *ndev)
879 {
880         struct sh_eth_private *mdp = netdev_priv(ndev);
881         int ret = 0;
882 
883         if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
884                 sh_eth_write(ndev, EDSR_ENALL, EDSR);
885                 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
886                              EDMR);
887 
888                 ret = sh_eth_check_reset(ndev);
889                 if (ret)
890                         return ret;
891 
892                 /* Table Init */
893                 sh_eth_write(ndev, 0x0, TDLAR);
894                 sh_eth_write(ndev, 0x0, TDFAR);
895                 sh_eth_write(ndev, 0x0, TDFXR);
896                 sh_eth_write(ndev, 0x0, TDFFR);
897                 sh_eth_write(ndev, 0x0, RDLAR);
898                 sh_eth_write(ndev, 0x0, RDFAR);
899                 sh_eth_write(ndev, 0x0, RDFXR);
900                 sh_eth_write(ndev, 0x0, RDFFR);
901 
902                 /* Reset HW CRC register */
903                 if (mdp->cd->hw_crc)
904                         sh_eth_write(ndev, 0x0, CSMR);
905 
906                 /* Select MII mode */
907                 if (mdp->cd->select_mii)
908                         sh_eth_select_mii(ndev);
909         } else {
910                 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
911                              EDMR);
912                 mdelay(3);
913                 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
914                              EDMR);
915         }
916 
917         return ret;
918 }
919 
920 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
921 static void sh_eth_set_receive_align(struct sk_buff *skb)
922 {
923         int reserve;
924 
925         reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
926         if (reserve)
927                 skb_reserve(skb, reserve);
928 }
929 #else
930 static void sh_eth_set_receive_align(struct sk_buff *skb)
931 {
932         skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
933 }
934 #endif
935 
936 
937 /* CPU <-> EDMAC endian convert */
938 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
939 {
940         switch (mdp->edmac_endian) {
941         case EDMAC_LITTLE_ENDIAN:
942                 return cpu_to_le32(x);
943         case EDMAC_BIG_ENDIAN:
944                 return cpu_to_be32(x);
945         }
946         return x;
947 }
948 
949 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
950 {
951         switch (mdp->edmac_endian) {
952         case EDMAC_LITTLE_ENDIAN:
953                 return le32_to_cpu(x);
954         case EDMAC_BIG_ENDIAN:
955                 return be32_to_cpu(x);
956         }
957         return x;
958 }
959 
960 /* Program the hardware MAC address from dev->dev_addr. */
961 static void update_mac_address(struct net_device *ndev)
962 {
963         sh_eth_write(ndev,
964                      (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
965                      (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
966         sh_eth_write(ndev,
967                      (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
968 }
969 
970 /* Get MAC address from SuperH MAC address register
971  *
972  * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
973  * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
974  * When you want use this device, you must set MAC address in bootloader.
975  *
976  */
977 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
978 {
979         if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
980                 memcpy(ndev->dev_addr, mac, ETH_ALEN);
981         } else {
982                 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
983                 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
984                 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
985                 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
986                 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
987                 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
988         }
989 }
990 
991 static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
992 {
993         if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
994                 return EDTRR_TRNS_GETHER;
995         else
996                 return EDTRR_TRNS_ETHER;
997 }
998 
999 struct bb_info {
1000         void (*set_gate)(void *addr);
1001         struct mdiobb_ctrl ctrl;
1002         void *addr;
1003         u32 mmd_msk;/* MMD */
1004         u32 mdo_msk;
1005         u32 mdi_msk;
1006         u32 mdc_msk;
1007 };
1008 
1009 /* PHY bit set */
1010 static void bb_set(void *addr, u32 msk)
1011 {
1012         iowrite32(ioread32(addr) | msk, addr);
1013 }
1014 
1015 /* PHY bit clear */
1016 static void bb_clr(void *addr, u32 msk)
1017 {
1018         iowrite32((ioread32(addr) & ~msk), addr);
1019 }
1020 
1021 /* PHY bit read */
1022 static int bb_read(void *addr, u32 msk)
1023 {
1024         return (ioread32(addr) & msk) != 0;
1025 }
1026 
1027 /* Data I/O pin control */
1028 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1029 {
1030         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1031 
1032         if (bitbang->set_gate)
1033                 bitbang->set_gate(bitbang->addr);
1034 
1035         if (bit)
1036                 bb_set(bitbang->addr, bitbang->mmd_msk);
1037         else
1038                 bb_clr(bitbang->addr, bitbang->mmd_msk);
1039 }
1040 
1041 /* Set bit data*/
1042 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
1043 {
1044         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1045 
1046         if (bitbang->set_gate)
1047                 bitbang->set_gate(bitbang->addr);
1048 
1049         if (bit)
1050                 bb_set(bitbang->addr, bitbang->mdo_msk);
1051         else
1052                 bb_clr(bitbang->addr, bitbang->mdo_msk);
1053 }
1054 
1055 /* Get bit data*/
1056 static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
1057 {
1058         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1059 
1060         if (bitbang->set_gate)
1061                 bitbang->set_gate(bitbang->addr);
1062 
1063         return bb_read(bitbang->addr, bitbang->mdi_msk);
1064 }
1065 
1066 /* MDC pin control */
1067 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1068 {
1069         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1070 
1071         if (bitbang->set_gate)
1072                 bitbang->set_gate(bitbang->addr);
1073 
1074         if (bit)
1075                 bb_set(bitbang->addr, bitbang->mdc_msk);
1076         else
1077                 bb_clr(bitbang->addr, bitbang->mdc_msk);
1078 }
1079 
1080 /* mdio bus control struct */
1081 static struct mdiobb_ops bb_ops = {
1082         .owner = THIS_MODULE,
1083         .set_mdc = sh_mdc_ctrl,
1084         .set_mdio_dir = sh_mmd_ctrl,
1085         .set_mdio_data = sh_set_mdio,
1086         .get_mdio_data = sh_get_mdio,
1087 };
1088 
1089 /* free skb and descriptor buffer */
1090 static void sh_eth_ring_free(struct net_device *ndev)
1091 {
1092         struct sh_eth_private *mdp = netdev_priv(ndev);
1093         int i;
1094 
1095         /* Free Rx skb ringbuffer */
1096         if (mdp->rx_skbuff) {
1097                 for (i = 0; i < mdp->num_rx_ring; i++) {
1098                         if (mdp->rx_skbuff[i])
1099                                 dev_kfree_skb(mdp->rx_skbuff[i]);
1100                 }
1101         }
1102         kfree(mdp->rx_skbuff);
1103         mdp->rx_skbuff = NULL;
1104 
1105         /* Free Tx skb ringbuffer */
1106         if (mdp->tx_skbuff) {
1107                 for (i = 0; i < mdp->num_tx_ring; i++) {
1108                         if (mdp->tx_skbuff[i])
1109                                 dev_kfree_skb(mdp->tx_skbuff[i]);
1110                 }
1111         }
1112         kfree(mdp->tx_skbuff);
1113         mdp->tx_skbuff = NULL;
1114 }
1115 
1116 /* format skb and descriptor buffer */
1117 static void sh_eth_ring_format(struct net_device *ndev)
1118 {
1119         struct sh_eth_private *mdp = netdev_priv(ndev);
1120         int i;
1121         struct sk_buff *skb;
1122         struct sh_eth_rxdesc *rxdesc = NULL;
1123         struct sh_eth_txdesc *txdesc = NULL;
1124         int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1125         int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1126 
1127         mdp->cur_rx = 0;
1128         mdp->cur_tx = 0;
1129         mdp->dirty_rx = 0;
1130         mdp->dirty_tx = 0;
1131 
1132         memset(mdp->rx_ring, 0, rx_ringsize);
1133 
1134         /* build Rx ring buffer */
1135         for (i = 0; i < mdp->num_rx_ring; i++) {
1136                 /* skb */
1137                 mdp->rx_skbuff[i] = NULL;
1138                 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
1139                 mdp->rx_skbuff[i] = skb;
1140                 if (skb == NULL)
1141                         break;
1142                 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1143                                DMA_FROM_DEVICE);
1144                 sh_eth_set_receive_align(skb);
1145 
1146                 /* RX descriptor */
1147                 rxdesc = &mdp->rx_ring[i];
1148                 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1149                 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1150 
1151                 /* The size of the buffer is 16 byte boundary. */
1152                 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1153                 /* Rx descriptor address set */
1154                 if (i == 0) {
1155                         sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1156                         if (sh_eth_is_gether(mdp) ||
1157                             sh_eth_is_rz_fast_ether(mdp))
1158                                 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1159                 }
1160         }
1161 
1162         mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1163 
1164         /* Mark the last entry as wrapping the ring. */
1165         rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
1166 
1167         memset(mdp->tx_ring, 0, tx_ringsize);
1168 
1169         /* build Tx ring buffer */
1170         for (i = 0; i < mdp->num_tx_ring; i++) {
1171                 mdp->tx_skbuff[i] = NULL;
1172                 txdesc = &mdp->tx_ring[i];
1173                 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1174                 txdesc->buffer_length = 0;
1175                 if (i == 0) {
1176                         /* Tx descriptor address set */
1177                         sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1178                         if (sh_eth_is_gether(mdp) ||
1179                             sh_eth_is_rz_fast_ether(mdp))
1180                                 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1181                 }
1182         }
1183 
1184         txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1185 }
1186 
1187 /* Get skb and descriptor buffer */
1188 static int sh_eth_ring_init(struct net_device *ndev)
1189 {
1190         struct sh_eth_private *mdp = netdev_priv(ndev);
1191         int rx_ringsize, tx_ringsize, ret = 0;
1192 
1193         /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1194          * card needs room to do 8 byte alignment, +2 so we can reserve
1195          * the first 2 bytes, and +16 gets room for the status word from the
1196          * card.
1197          */
1198         mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1199                           (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1200         if (mdp->cd->rpadir)
1201                 mdp->rx_buf_sz += NET_IP_ALIGN;
1202 
1203         /* Allocate RX and TX skb rings */
1204         mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
1205                                        sizeof(*mdp->rx_skbuff), GFP_KERNEL);
1206         if (!mdp->rx_skbuff) {
1207                 ret = -ENOMEM;
1208                 return ret;
1209         }
1210 
1211         mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
1212                                        sizeof(*mdp->tx_skbuff), GFP_KERNEL);
1213         if (!mdp->tx_skbuff) {
1214                 ret = -ENOMEM;
1215                 goto skb_ring_free;
1216         }
1217 
1218         /* Allocate all Rx descriptors. */
1219         rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1220         mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
1221                                           GFP_KERNEL);
1222         if (!mdp->rx_ring) {
1223                 ret = -ENOMEM;
1224                 goto desc_ring_free;
1225         }
1226 
1227         mdp->dirty_rx = 0;
1228 
1229         /* Allocate all Tx descriptors. */
1230         tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1231         mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
1232                                           GFP_KERNEL);
1233         if (!mdp->tx_ring) {
1234                 ret = -ENOMEM;
1235                 goto desc_ring_free;
1236         }
1237         return ret;
1238 
1239 desc_ring_free:
1240         /* free DMA buffer */
1241         dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1242 
1243 skb_ring_free:
1244         /* Free Rx and Tx skb ring buffer */
1245         sh_eth_ring_free(ndev);
1246         mdp->tx_ring = NULL;
1247         mdp->rx_ring = NULL;
1248 
1249         return ret;
1250 }
1251 
1252 static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
1253 {
1254         int ringsize;
1255 
1256         if (mdp->rx_ring) {
1257                 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1258                 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1259                                   mdp->rx_desc_dma);
1260                 mdp->rx_ring = NULL;
1261         }
1262 
1263         if (mdp->tx_ring) {
1264                 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1265                 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1266                                   mdp->tx_desc_dma);
1267                 mdp->tx_ring = NULL;
1268         }
1269 }
1270 
1271 static int sh_eth_dev_init(struct net_device *ndev, bool start)
1272 {
1273         int ret = 0;
1274         struct sh_eth_private *mdp = netdev_priv(ndev);
1275         u32 val;
1276 
1277         /* Soft Reset */
1278         ret = sh_eth_reset(ndev);
1279         if (ret)
1280                 return ret;
1281 
1282         if (mdp->cd->rmiimode)
1283                 sh_eth_write(ndev, 0x1, RMIIMODE);
1284 
1285         /* Descriptor format */
1286         sh_eth_ring_format(ndev);
1287         if (mdp->cd->rpadir)
1288                 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
1289 
1290         /* all sh_eth int mask */
1291         sh_eth_write(ndev, 0, EESIPR);
1292 
1293 #if defined(__LITTLE_ENDIAN)
1294         if (mdp->cd->hw_swap)
1295                 sh_eth_write(ndev, EDMR_EL, EDMR);
1296         else
1297 #endif
1298                 sh_eth_write(ndev, 0, EDMR);
1299 
1300         /* FIFO size set */
1301         sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1302         sh_eth_write(ndev, 0, TFTR);
1303 
1304         /* Frame recv control (enable multiple-packets per rx irq) */
1305         sh_eth_write(ndev, RMCR_RNC, RMCR);
1306 
1307         sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
1308 
1309         if (mdp->cd->bculr)
1310                 sh_eth_write(ndev, 0x800, BCULR);       /* Burst sycle set */
1311 
1312         sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1313 
1314         if (!mdp->cd->no_trimd)
1315                 sh_eth_write(ndev, 0, TRIMD);
1316 
1317         /* Recv frame limit set register */
1318         sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1319                      RFLR);
1320 
1321         sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
1322         if (start)
1323                 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1324 
1325         /* PAUSE Prohibition */
1326         val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
1327                 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1328 
1329         sh_eth_write(ndev, val, ECMR);
1330 
1331         if (mdp->cd->set_rate)
1332                 mdp->cd->set_rate(ndev);
1333 
1334         /* E-MAC Status Register clear */
1335         sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1336 
1337         /* E-MAC Interrupt Enable register */
1338         if (start)
1339                 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1340 
1341         /* Set MAC address */
1342         update_mac_address(ndev);
1343 
1344         /* mask reset */
1345         if (mdp->cd->apr)
1346                 sh_eth_write(ndev, APR_AP, APR);
1347         if (mdp->cd->mpr)
1348                 sh_eth_write(ndev, MPR_MP, MPR);
1349         if (mdp->cd->tpauser)
1350                 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1351 
1352         if (start) {
1353                 /* Setting the Rx mode will start the Rx process. */
1354                 sh_eth_write(ndev, EDRRR_R, EDRRR);
1355 
1356                 netif_start_queue(ndev);
1357         }
1358 
1359         return ret;
1360 }
1361 
1362 /* free Tx skb function */
1363 static int sh_eth_txfree(struct net_device *ndev)
1364 {
1365         struct sh_eth_private *mdp = netdev_priv(ndev);
1366         struct sh_eth_txdesc *txdesc;
1367         int free_num = 0;
1368         int entry = 0;
1369 
1370         for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1371                 entry = mdp->dirty_tx % mdp->num_tx_ring;
1372                 txdesc = &mdp->tx_ring[entry];
1373                 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1374                         break;
1375                 /* Free the original skb. */
1376                 if (mdp->tx_skbuff[entry]) {
1377                         dma_unmap_single(&ndev->dev, txdesc->addr,
1378                                          txdesc->buffer_length, DMA_TO_DEVICE);
1379                         dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1380                         mdp->tx_skbuff[entry] = NULL;
1381                         free_num++;
1382                 }
1383                 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1384                 if (entry >= mdp->num_tx_ring - 1)
1385                         txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1386 
1387                 ndev->stats.tx_packets++;
1388                 ndev->stats.tx_bytes += txdesc->buffer_length;
1389         }
1390         return free_num;
1391 }
1392 
1393 /* Packet receive function */
1394 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1395 {
1396         struct sh_eth_private *mdp = netdev_priv(ndev);
1397         struct sh_eth_rxdesc *rxdesc;
1398 
1399         int entry = mdp->cur_rx % mdp->num_rx_ring;
1400         int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1401         struct sk_buff *skb;
1402         u16 pkt_len = 0;
1403         u32 desc_status;
1404 
1405         rxdesc = &mdp->rx_ring[entry];
1406         while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1407                 desc_status = edmac_to_cpu(mdp, rxdesc->status);
1408                 pkt_len = rxdesc->frame_length;
1409 
1410                 if (--boguscnt < 0)
1411                         break;
1412 
1413                 if (*quota <= 0)
1414                         break;
1415 
1416                 (*quota)--;
1417 
1418                 if (!(desc_status & RDFEND))
1419                         ndev->stats.rx_length_errors++;
1420 
1421                 /* In case of almost all GETHER/ETHERs, the Receive Frame State
1422                  * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1423                  * bit 0. However, in case of the R8A7740, R8A779x, and
1424                  * R7S72100 the RFS bits are from bit 25 to bit 16. So, the
1425                  * driver needs right shifting by 16.
1426                  */
1427                 if (mdp->cd->shift_rd0)
1428                         desc_status >>= 16;
1429 
1430                 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1431                                    RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1432                         ndev->stats.rx_errors++;
1433                         if (desc_status & RD_RFS1)
1434                                 ndev->stats.rx_crc_errors++;
1435                         if (desc_status & RD_RFS2)
1436                                 ndev->stats.rx_frame_errors++;
1437                         if (desc_status & RD_RFS3)
1438                                 ndev->stats.rx_length_errors++;
1439                         if (desc_status & RD_RFS4)
1440                                 ndev->stats.rx_length_errors++;
1441                         if (desc_status & RD_RFS6)
1442                                 ndev->stats.rx_missed_errors++;
1443                         if (desc_status & RD_RFS10)
1444                                 ndev->stats.rx_over_errors++;
1445                 } else {
1446                         if (!mdp->cd->hw_swap)
1447                                 sh_eth_soft_swap(
1448                                         phys_to_virt(ALIGN(rxdesc->addr, 4)),
1449                                         pkt_len + 2);
1450                         skb = mdp->rx_skbuff[entry];
1451                         mdp->rx_skbuff[entry] = NULL;
1452                         if (mdp->cd->rpadir)
1453                                 skb_reserve(skb, NET_IP_ALIGN);
1454                         dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
1455                                                 mdp->rx_buf_sz,
1456                                                 DMA_FROM_DEVICE);
1457                         skb_put(skb, pkt_len);
1458                         skb->protocol = eth_type_trans(skb, ndev);
1459                         netif_receive_skb(skb);
1460                         ndev->stats.rx_packets++;
1461                         ndev->stats.rx_bytes += pkt_len;
1462                 }
1463                 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1464                 rxdesc = &mdp->rx_ring[entry];
1465         }
1466 
1467         /* Refill the Rx ring buffers. */
1468         for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1469                 entry = mdp->dirty_rx % mdp->num_rx_ring;
1470                 rxdesc = &mdp->rx_ring[entry];
1471                 /* The size of the buffer is 16 byte boundary. */
1472                 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1473 
1474                 if (mdp->rx_skbuff[entry] == NULL) {
1475                         skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
1476                         mdp->rx_skbuff[entry] = skb;
1477                         if (skb == NULL)
1478                                 break;  /* Better luck next round. */
1479                         dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1480                                        DMA_FROM_DEVICE);
1481                         sh_eth_set_receive_align(skb);
1482 
1483                         skb_checksum_none_assert(skb);
1484                         rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1485                 }
1486                 if (entry >= mdp->num_rx_ring - 1)
1487                         rxdesc->status |=
1488                                 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
1489                 else
1490                         rxdesc->status |=
1491                                 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1492         }
1493 
1494         /* Restart Rx engine if stopped. */
1495         /* If we don't need to check status, don't. -KDU */
1496         if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1497                 /* fix the values for the next receiving if RDE is set */
1498                 if (intr_status & EESR_RDE) {
1499                         u32 count = (sh_eth_read(ndev, RDFAR) -
1500                                      sh_eth_read(ndev, RDLAR)) >> 4;
1501 
1502                         mdp->cur_rx = count;
1503                         mdp->dirty_rx = count;
1504                 }
1505                 sh_eth_write(ndev, EDRRR_R, EDRRR);
1506         }
1507 
1508         return *quota <= 0;
1509 }
1510 
1511 static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1512 {
1513         /* disable tx and rx */
1514         sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
1515                 ~(ECMR_RE | ECMR_TE), ECMR);
1516 }
1517 
1518 static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1519 {
1520         /* enable tx and rx */
1521         sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
1522                 (ECMR_RE | ECMR_TE), ECMR);
1523 }
1524 
1525 /* error control function */
1526 static void sh_eth_error(struct net_device *ndev, int intr_status)
1527 {
1528         struct sh_eth_private *mdp = netdev_priv(ndev);
1529         u32 felic_stat;
1530         u32 link_stat;
1531         u32 mask;
1532 
1533         if (intr_status & EESR_ECI) {
1534                 felic_stat = sh_eth_read(ndev, ECSR);
1535                 sh_eth_write(ndev, felic_stat, ECSR);   /* clear int */
1536                 if (felic_stat & ECSR_ICD)
1537                         ndev->stats.tx_carrier_errors++;
1538                 if (felic_stat & ECSR_LCHNG) {
1539                         /* Link Changed */
1540                         if (mdp->cd->no_psr || mdp->no_ether_link) {
1541                                 goto ignore_link;
1542                         } else {
1543                                 link_stat = (sh_eth_read(ndev, PSR));
1544                                 if (mdp->ether_link_active_low)
1545                                         link_stat = ~link_stat;
1546                         }
1547                         if (!(link_stat & PHY_ST_LINK)) {
1548                                 sh_eth_rcv_snd_disable(ndev);
1549                         } else {
1550                                 /* Link Up */
1551                                 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1552                                                    ~DMAC_M_ECI, EESIPR);
1553                                 /* clear int */
1554                                 sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1555                                              ECSR);
1556                                 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1557                                                    DMAC_M_ECI, EESIPR);
1558                                 /* enable tx and rx */
1559                                 sh_eth_rcv_snd_enable(ndev);
1560                         }
1561                 }
1562         }
1563 
1564 ignore_link:
1565         if (intr_status & EESR_TWB) {
1566                 /* Unused write back interrupt */
1567                 if (intr_status & EESR_TABT) {  /* Transmit Abort int */
1568                         ndev->stats.tx_aborted_errors++;
1569                         netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
1570                 }
1571         }
1572 
1573         if (intr_status & EESR_RABT) {
1574                 /* Receive Abort int */
1575                 if (intr_status & EESR_RFRMER) {
1576                         /* Receive Frame Overflow int */
1577                         ndev->stats.rx_frame_errors++;
1578                         netif_err(mdp, rx_err, ndev, "Receive Abort\n");
1579                 }
1580         }
1581 
1582         if (intr_status & EESR_TDE) {
1583                 /* Transmit Descriptor Empty int */
1584                 ndev->stats.tx_fifo_errors++;
1585                 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
1586         }
1587 
1588         if (intr_status & EESR_TFE) {
1589                 /* FIFO under flow */
1590                 ndev->stats.tx_fifo_errors++;
1591                 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
1592         }
1593 
1594         if (intr_status & EESR_RDE) {
1595                 /* Receive Descriptor Empty int */
1596                 ndev->stats.rx_over_errors++;
1597                 netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
1598         }
1599 
1600         if (intr_status & EESR_RFE) {
1601                 /* Receive FIFO Overflow int */
1602                 ndev->stats.rx_fifo_errors++;
1603                 netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
1604         }
1605 
1606         if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1607                 /* Address Error */
1608                 ndev->stats.tx_fifo_errors++;
1609                 netif_err(mdp, tx_err, ndev, "Address Error\n");
1610         }
1611 
1612         mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1613         if (mdp->cd->no_ade)
1614                 mask &= ~EESR_ADE;
1615         if (intr_status & mask) {
1616                 /* Tx error */
1617                 u32 edtrr = sh_eth_read(ndev, EDTRR);
1618 
1619                 /* dmesg */
1620                 netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1621                            intr_status, mdp->cur_tx, mdp->dirty_tx,
1622                            (u32)ndev->state, edtrr);
1623                 /* dirty buffer free */
1624                 sh_eth_txfree(ndev);
1625 
1626                 /* SH7712 BUG */
1627                 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1628                         /* tx dma start */
1629                         sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1630                 }
1631                 /* wakeup */
1632                 netif_wake_queue(ndev);
1633         }
1634 }
1635 
1636 static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1637 {
1638         struct net_device *ndev = netdev;
1639         struct sh_eth_private *mdp = netdev_priv(ndev);
1640         struct sh_eth_cpu_data *cd = mdp->cd;
1641         irqreturn_t ret = IRQ_NONE;
1642         unsigned long intr_status, intr_enable;
1643 
1644         spin_lock(&mdp->lock);
1645 
1646         /* Get interrupt status */
1647         intr_status = sh_eth_read(ndev, EESR);
1648         /* Mask it with the interrupt mask, forcing ECI interrupt to be always
1649          * enabled since it's the one that  comes thru regardless of the mask,
1650          * and we need to fully handle it in sh_eth_error() in order to quench
1651          * it as it doesn't get cleared by just writing 1 to the ECI bit...
1652          */
1653         intr_enable = sh_eth_read(ndev, EESIPR);
1654         intr_status &= intr_enable | DMAC_M_ECI;
1655         if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
1656                 ret = IRQ_HANDLED;
1657         else
1658                 goto other_irq;
1659 
1660         if (intr_status & EESR_RX_CHECK) {
1661                 if (napi_schedule_prep(&mdp->napi)) {
1662                         /* Mask Rx interrupts */
1663                         sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1664                                      EESIPR);
1665                         __napi_schedule(&mdp->napi);
1666                 } else {
1667                         netdev_warn(ndev,
1668                                     "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
1669                                     intr_status, intr_enable);
1670                 }
1671         }
1672 
1673         /* Tx Check */
1674         if (intr_status & cd->tx_check) {
1675                 /* Clear Tx interrupts */
1676                 sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1677 
1678                 sh_eth_txfree(ndev);
1679                 netif_wake_queue(ndev);
1680         }
1681 
1682         if (intr_status & cd->eesr_err_check) {
1683                 /* Clear error interrupts */
1684                 sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1685 
1686                 sh_eth_error(ndev, intr_status);
1687         }
1688 
1689 other_irq:
1690         spin_unlock(&mdp->lock);
1691 
1692         return ret;
1693 }
1694 
1695 static int sh_eth_poll(struct napi_struct *napi, int budget)
1696 {
1697         struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1698                                                   napi);
1699         struct net_device *ndev = napi->dev;
1700         int quota = budget;
1701         unsigned long intr_status;
1702 
1703         for (;;) {
1704                 intr_status = sh_eth_read(ndev, EESR);
1705                 if (!(intr_status & EESR_RX_CHECK))
1706                         break;
1707                 /* Clear Rx interrupts */
1708                 sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1709 
1710                 if (sh_eth_rx(ndev, intr_status, &quota))
1711                         goto out;
1712         }
1713 
1714         napi_complete(napi);
1715 
1716         /* Reenable Rx interrupts */
1717         sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1718 out:
1719         return budget - quota;
1720 }
1721 
1722 /* PHY state control function */
1723 static void sh_eth_adjust_link(struct net_device *ndev)
1724 {
1725         struct sh_eth_private *mdp = netdev_priv(ndev);
1726         struct phy_device *phydev = mdp->phydev;
1727         int new_state = 0;
1728 
1729         if (phydev->link) {
1730                 if (phydev->duplex != mdp->duplex) {
1731                         new_state = 1;
1732                         mdp->duplex = phydev->duplex;
1733                         if (mdp->cd->set_duplex)
1734                                 mdp->cd->set_duplex(ndev);
1735                 }
1736 
1737                 if (phydev->speed != mdp->speed) {
1738                         new_state = 1;
1739                         mdp->speed = phydev->speed;
1740                         if (mdp->cd->set_rate)
1741                                 mdp->cd->set_rate(ndev);
1742                 }
1743                 if (!mdp->link) {
1744                         sh_eth_write(ndev,
1745                                      sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
1746                                      ECMR);
1747                         new_state = 1;
1748                         mdp->link = phydev->link;
1749                         if (mdp->cd->no_psr || mdp->no_ether_link)
1750                                 sh_eth_rcv_snd_enable(ndev);
1751                 }
1752         } else if (mdp->link) {
1753                 new_state = 1;
1754                 mdp->link = 0;
1755                 mdp->speed = 0;
1756                 mdp->duplex = -1;
1757                 if (mdp->cd->no_psr || mdp->no_ether_link)
1758                         sh_eth_rcv_snd_disable(ndev);
1759         }
1760 
1761         if (new_state && netif_msg_link(mdp))
1762                 phy_print_status(phydev);
1763 }
1764 
1765 /* PHY init function */
1766 static int sh_eth_phy_init(struct net_device *ndev)
1767 {
1768         struct device_node *np = ndev->dev.parent->of_node;
1769         struct sh_eth_private *mdp = netdev_priv(ndev);
1770         struct phy_device *phydev = NULL;
1771 
1772         mdp->link = 0;
1773         mdp->speed = 0;
1774         mdp->duplex = -1;
1775 
1776         /* Try connect to PHY */
1777         if (np) {
1778                 struct device_node *pn;
1779 
1780                 pn = of_parse_phandle(np, "phy-handle", 0);
1781                 phydev = of_phy_connect(ndev, pn,
1782                                         sh_eth_adjust_link, 0,
1783                                         mdp->phy_interface);
1784 
1785                 if (!phydev)
1786                         phydev = ERR_PTR(-ENOENT);
1787         } else {
1788                 char phy_id[MII_BUS_ID_SIZE + 3];
1789 
1790                 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1791                          mdp->mii_bus->id, mdp->phy_id);
1792 
1793                 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1794                                      mdp->phy_interface);
1795         }
1796 
1797         if (IS_ERR(phydev)) {
1798                 netdev_err(ndev, "failed to connect PHY\n");
1799                 return PTR_ERR(phydev);
1800         }
1801 
1802         netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
1803                     phydev->addr, phydev->irq, phydev->drv->name);
1804 
1805         mdp->phydev = phydev;
1806 
1807         return 0;
1808 }
1809 
1810 /* PHY control start function */
1811 static int sh_eth_phy_start(struct net_device *ndev)
1812 {
1813         struct sh_eth_private *mdp = netdev_priv(ndev);
1814         int ret;
1815 
1816         ret = sh_eth_phy_init(ndev);
1817         if (ret)
1818                 return ret;
1819 
1820         phy_start(mdp->phydev);
1821 
1822         return 0;
1823 }
1824 
1825 static int sh_eth_get_settings(struct net_device *ndev,
1826                                struct ethtool_cmd *ecmd)
1827 {
1828         struct sh_eth_private *mdp = netdev_priv(ndev);
1829         unsigned long flags;
1830         int ret;
1831 
1832         spin_lock_irqsave(&mdp->lock, flags);
1833         ret = phy_ethtool_gset(mdp->phydev, ecmd);
1834         spin_unlock_irqrestore(&mdp->lock, flags);
1835 
1836         return ret;
1837 }
1838 
1839 static int sh_eth_set_settings(struct net_device *ndev,
1840                                struct ethtool_cmd *ecmd)
1841 {
1842         struct sh_eth_private *mdp = netdev_priv(ndev);
1843         unsigned long flags;
1844         int ret;
1845 
1846         spin_lock_irqsave(&mdp->lock, flags);
1847 
1848         /* disable tx and rx */
1849         sh_eth_rcv_snd_disable(ndev);
1850 
1851         ret = phy_ethtool_sset(mdp->phydev, ecmd);
1852         if (ret)
1853                 goto error_exit;
1854 
1855         if (ecmd->duplex == DUPLEX_FULL)
1856                 mdp->duplex = 1;
1857         else
1858                 mdp->duplex = 0;
1859 
1860         if (mdp->cd->set_duplex)
1861                 mdp->cd->set_duplex(ndev);
1862 
1863 error_exit:
1864         mdelay(1);
1865 
1866         /* enable tx and rx */
1867         sh_eth_rcv_snd_enable(ndev);
1868 
1869         spin_unlock_irqrestore(&mdp->lock, flags);
1870 
1871         return ret;
1872 }
1873 
1874 static int sh_eth_nway_reset(struct net_device *ndev)
1875 {
1876         struct sh_eth_private *mdp = netdev_priv(ndev);
1877         unsigned long flags;
1878         int ret;
1879 
1880         spin_lock_irqsave(&mdp->lock, flags);
1881         ret = phy_start_aneg(mdp->phydev);
1882         spin_unlock_irqrestore(&mdp->lock, flags);
1883 
1884         return ret;
1885 }
1886 
1887 static u32 sh_eth_get_msglevel(struct net_device *ndev)
1888 {
1889         struct sh_eth_private *mdp = netdev_priv(ndev);
1890         return mdp->msg_enable;
1891 }
1892 
1893 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1894 {
1895         struct sh_eth_private *mdp = netdev_priv(ndev);
1896         mdp->msg_enable = value;
1897 }
1898 
1899 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1900         "rx_current", "tx_current",
1901         "rx_dirty", "tx_dirty",
1902 };
1903 #define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
1904 
1905 static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1906 {
1907         switch (sset) {
1908         case ETH_SS_STATS:
1909                 return SH_ETH_STATS_LEN;
1910         default:
1911                 return -EOPNOTSUPP;
1912         }
1913 }
1914 
1915 static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1916                                      struct ethtool_stats *stats, u64 *data)
1917 {
1918         struct sh_eth_private *mdp = netdev_priv(ndev);
1919         int i = 0;
1920 
1921         /* device-specific stats */
1922         data[i++] = mdp->cur_rx;
1923         data[i++] = mdp->cur_tx;
1924         data[i++] = mdp->dirty_rx;
1925         data[i++] = mdp->dirty_tx;
1926 }
1927 
1928 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1929 {
1930         switch (stringset) {
1931         case ETH_SS_STATS:
1932                 memcpy(data, *sh_eth_gstrings_stats,
1933                        sizeof(sh_eth_gstrings_stats));
1934                 break;
1935         }
1936 }
1937 
1938 static void sh_eth_get_ringparam(struct net_device *ndev,
1939                                  struct ethtool_ringparam *ring)
1940 {
1941         struct sh_eth_private *mdp = netdev_priv(ndev);
1942 
1943         ring->rx_max_pending = RX_RING_MAX;
1944         ring->tx_max_pending = TX_RING_MAX;
1945         ring->rx_pending = mdp->num_rx_ring;
1946         ring->tx_pending = mdp->num_tx_ring;
1947 }
1948 
1949 static int sh_eth_set_ringparam(struct net_device *ndev,
1950                                 struct ethtool_ringparam *ring)
1951 {
1952         struct sh_eth_private *mdp = netdev_priv(ndev);
1953         int ret;
1954 
1955         if (ring->tx_pending > TX_RING_MAX ||
1956             ring->rx_pending > RX_RING_MAX ||
1957             ring->tx_pending < TX_RING_MIN ||
1958             ring->rx_pending < RX_RING_MIN)
1959                 return -EINVAL;
1960         if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1961                 return -EINVAL;
1962 
1963         if (netif_running(ndev)) {
1964                 netif_tx_disable(ndev);
1965                 /* Disable interrupts by clearing the interrupt mask. */
1966                 sh_eth_write(ndev, 0x0000, EESIPR);
1967                 /* Stop the chip's Tx and Rx processes. */
1968                 sh_eth_write(ndev, 0, EDTRR);
1969                 sh_eth_write(ndev, 0, EDRRR);
1970                 synchronize_irq(ndev->irq);
1971         }
1972 
1973         /* Free all the skbuffs in the Rx queue. */
1974         sh_eth_ring_free(ndev);
1975         /* Free DMA buffer */
1976         sh_eth_free_dma_buffer(mdp);
1977 
1978         /* Set new parameters */
1979         mdp->num_rx_ring = ring->rx_pending;
1980         mdp->num_tx_ring = ring->tx_pending;
1981 
1982         ret = sh_eth_ring_init(ndev);
1983         if (ret < 0) {
1984                 netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__);
1985                 return ret;
1986         }
1987         ret = sh_eth_dev_init(ndev, false);
1988         if (ret < 0) {
1989                 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__);
1990                 return ret;
1991         }
1992 
1993         if (netif_running(ndev)) {
1994                 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1995                 /* Setting the Rx mode will start the Rx process. */
1996                 sh_eth_write(ndev, EDRRR_R, EDRRR);
1997                 netif_wake_queue(ndev);
1998         }
1999 
2000         return 0;
2001 }
2002 
2003 static const struct ethtool_ops sh_eth_ethtool_ops = {
2004         .get_settings   = sh_eth_get_settings,
2005         .set_settings   = sh_eth_set_settings,
2006         .nway_reset     = sh_eth_nway_reset,
2007         .get_msglevel   = sh_eth_get_msglevel,
2008         .set_msglevel   = sh_eth_set_msglevel,
2009         .get_link       = ethtool_op_get_link,
2010         .get_strings    = sh_eth_get_strings,
2011         .get_ethtool_stats  = sh_eth_get_ethtool_stats,
2012         .get_sset_count     = sh_eth_get_sset_count,
2013         .get_ringparam  = sh_eth_get_ringparam,
2014         .set_ringparam  = sh_eth_set_ringparam,
2015 };
2016 
2017 /* network device open function */
2018 static int sh_eth_open(struct net_device *ndev)
2019 {
2020         int ret = 0;
2021         struct sh_eth_private *mdp = netdev_priv(ndev);
2022 
2023         pm_runtime_get_sync(&mdp->pdev->dev);
2024 
2025         napi_enable(&mdp->napi);
2026 
2027         ret = request_irq(ndev->irq, sh_eth_interrupt,
2028                           mdp->cd->irq_flags, ndev->name, ndev);
2029         if (ret) {
2030                 netdev_err(ndev, "Can not assign IRQ number\n");
2031                 goto out_napi_off;
2032         }
2033 
2034         /* Descriptor set */
2035         ret = sh_eth_ring_init(ndev);
2036         if (ret)
2037                 goto out_free_irq;
2038 
2039         /* device init */
2040         ret = sh_eth_dev_init(ndev, true);
2041         if (ret)
2042                 goto out_free_irq;
2043 
2044         /* PHY control start*/
2045         ret = sh_eth_phy_start(ndev);
2046         if (ret)
2047                 goto out_free_irq;
2048 
2049         return ret;
2050 
2051 out_free_irq:
2052         free_irq(ndev->irq, ndev);
2053 out_napi_off:
2054         napi_disable(&mdp->napi);
2055         pm_runtime_put_sync(&mdp->pdev->dev);
2056         return ret;
2057 }
2058 
2059 /* Timeout function */
2060 static void sh_eth_tx_timeout(struct net_device *ndev)
2061 {
2062         struct sh_eth_private *mdp = netdev_priv(ndev);
2063         struct sh_eth_rxdesc *rxdesc;
2064         int i;
2065 
2066         netif_stop_queue(ndev);
2067 
2068         netif_err(mdp, timer, ndev,
2069                   "transmit timed out, status %8.8x, resetting...\n",
2070                   (int)sh_eth_read(ndev, EESR));
2071 
2072         /* tx_errors count up */
2073         ndev->stats.tx_errors++;
2074 
2075         /* Free all the skbuffs in the Rx queue. */
2076         for (i = 0; i < mdp->num_rx_ring; i++) {
2077                 rxdesc = &mdp->rx_ring[i];
2078                 rxdesc->status = 0;
2079                 rxdesc->addr = 0xBADF00D0;
2080                 if (mdp->rx_skbuff[i])
2081                         dev_kfree_skb(mdp->rx_skbuff[i]);
2082                 mdp->rx_skbuff[i] = NULL;
2083         }
2084         for (i = 0; i < mdp->num_tx_ring; i++) {
2085                 if (mdp->tx_skbuff[i])
2086                         dev_kfree_skb(mdp->tx_skbuff[i]);
2087                 mdp->tx_skbuff[i] = NULL;
2088         }
2089 
2090         /* device init */
2091         sh_eth_dev_init(ndev, true);
2092 }
2093 
2094 /* Packet transmit function */
2095 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2096 {
2097         struct sh_eth_private *mdp = netdev_priv(ndev);
2098         struct sh_eth_txdesc *txdesc;
2099         u32 entry;
2100         unsigned long flags;
2101 
2102         spin_lock_irqsave(&mdp->lock, flags);
2103         if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2104                 if (!sh_eth_txfree(ndev)) {
2105                         netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2106                         netif_stop_queue(ndev);
2107                         spin_unlock_irqrestore(&mdp->lock, flags);
2108                         return NETDEV_TX_BUSY;
2109                 }
2110         }
2111         spin_unlock_irqrestore(&mdp->lock, flags);
2112 
2113         entry = mdp->cur_tx % mdp->num_tx_ring;
2114         mdp->tx_skbuff[entry] = skb;
2115         txdesc = &mdp->tx_ring[entry];
2116         /* soft swap. */
2117         if (!mdp->cd->hw_swap)
2118                 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
2119                                  skb->len + 2);
2120         txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2121                                       DMA_TO_DEVICE);
2122         if (skb->len < ETH_ZLEN)
2123                 txdesc->buffer_length = ETH_ZLEN;
2124         else
2125                 txdesc->buffer_length = skb->len;
2126 
2127         if (entry >= mdp->num_tx_ring - 1)
2128                 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
2129         else
2130                 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
2131 
2132         mdp->cur_tx++;
2133 
2134         if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
2135                 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
2136 
2137         return NETDEV_TX_OK;
2138 }
2139 
2140 /* device close function */
2141 static int sh_eth_close(struct net_device *ndev)
2142 {
2143         struct sh_eth_private *mdp = netdev_priv(ndev);
2144 
2145         netif_stop_queue(ndev);
2146 
2147         /* Disable interrupts by clearing the interrupt mask. */
2148         sh_eth_write(ndev, 0x0000, EESIPR);
2149 
2150         /* Stop the chip's Tx and Rx processes. */
2151         sh_eth_write(ndev, 0, EDTRR);
2152         sh_eth_write(ndev, 0, EDRRR);
2153 
2154         /* PHY Disconnect */
2155         if (mdp->phydev) {
2156                 phy_stop(mdp->phydev);
2157                 phy_disconnect(mdp->phydev);
2158         }
2159 
2160         free_irq(ndev->irq, ndev);
2161 
2162         napi_disable(&mdp->napi);
2163 
2164         /* Free all the skbuffs in the Rx queue. */
2165         sh_eth_ring_free(ndev);
2166 
2167         /* free DMA buffer */
2168         sh_eth_free_dma_buffer(mdp);
2169 
2170         pm_runtime_put_sync(&mdp->pdev->dev);
2171 
2172         return 0;
2173 }
2174 
2175 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2176 {
2177         struct sh_eth_private *mdp = netdev_priv(ndev);
2178 
2179         if (sh_eth_is_rz_fast_ether(mdp))
2180                 return &ndev->stats;
2181 
2182         pm_runtime_get_sync(&mdp->pdev->dev);
2183 
2184         ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
2185         sh_eth_write(ndev, 0, TROCR);   /* (write clear) */
2186         ndev->stats.collisions += sh_eth_read(ndev, CDCR);
2187         sh_eth_write(ndev, 0, CDCR);    /* (write clear) */
2188         ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
2189         sh_eth_write(ndev, 0, LCCR);    /* (write clear) */
2190         if (sh_eth_is_gether(mdp)) {
2191                 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
2192                 sh_eth_write(ndev, 0, CERCR);   /* (write clear) */
2193                 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
2194                 sh_eth_write(ndev, 0, CEECR);   /* (write clear) */
2195         } else {
2196                 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
2197                 sh_eth_write(ndev, 0, CNDCR);   /* (write clear) */
2198         }
2199         pm_runtime_put_sync(&mdp->pdev->dev);
2200 
2201         return &ndev->stats;
2202 }
2203 
2204 /* ioctl to device function */
2205 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2206 {
2207         struct sh_eth_private *mdp = netdev_priv(ndev);
2208         struct phy_device *phydev = mdp->phydev;
2209 
2210         if (!netif_running(ndev))
2211                 return -EINVAL;
2212 
2213         if (!phydev)
2214                 return -ENODEV;
2215 
2216         return phy_mii_ioctl(phydev, rq, cmd);
2217 }
2218 
2219 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2220 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2221                                             int entry)
2222 {
2223         return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2224 }
2225 
2226 static u32 sh_eth_tsu_get_post_mask(int entry)
2227 {
2228         return 0x0f << (28 - ((entry % 8) * 4));
2229 }
2230 
2231 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2232 {
2233         return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2234 }
2235 
2236 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2237                                              int entry)
2238 {
2239         struct sh_eth_private *mdp = netdev_priv(ndev);
2240         u32 tmp;
2241         void *reg_offset;
2242 
2243         reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2244         tmp = ioread32(reg_offset);
2245         iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2246 }
2247 
2248 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2249                                               int entry)
2250 {
2251         struct sh_eth_private *mdp = netdev_priv(ndev);
2252         u32 post_mask, ref_mask, tmp;
2253         void *reg_offset;
2254 
2255         reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2256         post_mask = sh_eth_tsu_get_post_mask(entry);
2257         ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2258 
2259         tmp = ioread32(reg_offset);
2260         iowrite32(tmp & ~post_mask, reg_offset);
2261 
2262         /* If other port enables, the function returns "true" */
2263         return tmp & ref_mask;
2264 }
2265 
2266 static int sh_eth_tsu_busy(struct net_device *ndev)
2267 {
2268         int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2269         struct sh_eth_private *mdp = netdev_priv(ndev);
2270 
2271         while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2272                 udelay(10);
2273                 timeout--;
2274                 if (timeout <= 0) {
2275                         netdev_err(ndev, "%s: timeout\n", __func__);
2276                         return -ETIMEDOUT;
2277                 }
2278         }
2279 
2280         return 0;
2281 }
2282 
2283 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2284                                   const u8 *addr)
2285 {
2286         u32 val;
2287 
2288         val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2289         iowrite32(val, reg);
2290         if (sh_eth_tsu_busy(ndev) < 0)
2291                 return -EBUSY;
2292 
2293         val = addr[4] << 8 | addr[5];
2294         iowrite32(val, reg + 4);
2295         if (sh_eth_tsu_busy(ndev) < 0)
2296                 return -EBUSY;
2297 
2298         return 0;
2299 }
2300 
2301 static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2302 {
2303         u32 val;
2304 
2305         val = ioread32(reg);
2306         addr[0] = (val >> 24) & 0xff;
2307         addr[1] = (val >> 16) & 0xff;
2308         addr[2] = (val >> 8) & 0xff;
2309         addr[3] = val & 0xff;
2310         val = ioread32(reg + 4);
2311         addr[4] = (val >> 8) & 0xff;
2312         addr[5] = val & 0xff;
2313 }
2314 
2315 
2316 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2317 {
2318         struct sh_eth_private *mdp = netdev_priv(ndev);
2319         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2320         int i;
2321         u8 c_addr[ETH_ALEN];
2322 
2323         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2324                 sh_eth_tsu_read_entry(reg_offset, c_addr);
2325                 if (ether_addr_equal(addr, c_addr))
2326                         return i;
2327         }
2328 
2329         return -ENOENT;
2330 }
2331 
2332 static int sh_eth_tsu_find_empty(struct net_device *ndev)
2333 {
2334         u8 blank[ETH_ALEN];
2335         int entry;
2336 
2337         memset(blank, 0, sizeof(blank));
2338         entry = sh_eth_tsu_find_entry(ndev, blank);
2339         return (entry < 0) ? -ENOMEM : entry;
2340 }
2341 
2342 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2343                                               int entry)
2344 {
2345         struct sh_eth_private *mdp = netdev_priv(ndev);
2346         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2347         int ret;
2348         u8 blank[ETH_ALEN];
2349 
2350         sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2351                          ~(1 << (31 - entry)), TSU_TEN);
2352 
2353         memset(blank, 0, sizeof(blank));
2354         ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2355         if (ret < 0)
2356                 return ret;
2357         return 0;
2358 }
2359 
2360 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2361 {
2362         struct sh_eth_private *mdp = netdev_priv(ndev);
2363         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2364         int i, ret;
2365 
2366         if (!mdp->cd->tsu)
2367                 return 0;
2368 
2369         i = sh_eth_tsu_find_entry(ndev, addr);
2370         if (i < 0) {
2371                 /* No entry found, create one */
2372                 i = sh_eth_tsu_find_empty(ndev);
2373                 if (i < 0)
2374                         return -ENOMEM;
2375                 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2376                 if (ret < 0)
2377                         return ret;
2378 
2379                 /* Enable the entry */
2380                 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2381                                  (1 << (31 - i)), TSU_TEN);
2382         }
2383 
2384         /* Entry found or created, enable POST */
2385         sh_eth_tsu_enable_cam_entry_post(ndev, i);
2386 
2387         return 0;
2388 }
2389 
2390 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2391 {
2392         struct sh_eth_private *mdp = netdev_priv(ndev);
2393         int i, ret;
2394 
2395         if (!mdp->cd->tsu)
2396                 return 0;
2397 
2398         i = sh_eth_tsu_find_entry(ndev, addr);
2399         if (i) {
2400                 /* Entry found */
2401                 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2402                         goto done;
2403 
2404                 /* Disable the entry if both ports was disabled */
2405                 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2406                 if (ret < 0)
2407                         return ret;
2408         }
2409 done:
2410         return 0;
2411 }
2412 
2413 static int sh_eth_tsu_purge_all(struct net_device *ndev)
2414 {
2415         struct sh_eth_private *mdp = netdev_priv(ndev);
2416         int i, ret;
2417 
2418         if (unlikely(!mdp->cd->tsu))
2419                 return 0;
2420 
2421         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2422                 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2423                         continue;
2424 
2425                 /* Disable the entry if both ports was disabled */
2426                 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2427                 if (ret < 0)
2428                         return ret;
2429         }
2430 
2431         return 0;
2432 }
2433 
2434 static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2435 {
2436         struct sh_eth_private *mdp = netdev_priv(ndev);
2437         u8 addr[ETH_ALEN];
2438         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2439         int i;
2440 
2441         if (unlikely(!mdp->cd->tsu))
2442                 return;
2443 
2444         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2445                 sh_eth_tsu_read_entry(reg_offset, addr);
2446                 if (is_multicast_ether_addr(addr))
2447                         sh_eth_tsu_del_entry(ndev, addr);
2448         }
2449 }
2450 
2451 /* Multicast reception directions set */
2452 static void sh_eth_set_multicast_list(struct net_device *ndev)
2453 {
2454         struct sh_eth_private *mdp = netdev_priv(ndev);
2455         u32 ecmr_bits;
2456         int mcast_all = 0;
2457         unsigned long flags;
2458 
2459         spin_lock_irqsave(&mdp->lock, flags);
2460         /* Initial condition is MCT = 1, PRM = 0.
2461          * Depending on ndev->flags, set PRM or clear MCT
2462          */
2463         ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
2464 
2465         if (!(ndev->flags & IFF_MULTICAST)) {
2466                 sh_eth_tsu_purge_mcast(ndev);
2467                 mcast_all = 1;
2468         }
2469         if (ndev->flags & IFF_ALLMULTI) {
2470                 sh_eth_tsu_purge_mcast(ndev);
2471                 ecmr_bits &= ~ECMR_MCT;
2472                 mcast_all = 1;
2473         }
2474 
2475         if (ndev->flags & IFF_PROMISC) {
2476                 sh_eth_tsu_purge_all(ndev);
2477                 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2478         } else if (mdp->cd->tsu) {
2479                 struct netdev_hw_addr *ha;
2480                 netdev_for_each_mc_addr(ha, ndev) {
2481                         if (mcast_all && is_multicast_ether_addr(ha->addr))
2482                                 continue;
2483 
2484                         if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2485                                 if (!mcast_all) {
2486                                         sh_eth_tsu_purge_mcast(ndev);
2487                                         ecmr_bits &= ~ECMR_MCT;
2488                                         mcast_all = 1;
2489                                 }
2490                         }
2491                 }
2492         } else {
2493                 /* Normal, unicast/broadcast-only mode. */
2494                 ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
2495         }
2496 
2497         /* update the ethernet mode */
2498         sh_eth_write(ndev, ecmr_bits, ECMR);
2499 
2500         spin_unlock_irqrestore(&mdp->lock, flags);
2501 }
2502 
2503 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2504 {
2505         if (!mdp->port)
2506                 return TSU_VTAG0;
2507         else
2508                 return TSU_VTAG1;
2509 }
2510 
2511 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2512                                   __be16 proto, u16 vid)
2513 {
2514         struct sh_eth_private *mdp = netdev_priv(ndev);
2515         int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2516 
2517         if (unlikely(!mdp->cd->tsu))
2518                 return -EPERM;
2519 
2520         /* No filtering if vid = 0 */
2521         if (!vid)
2522                 return 0;
2523 
2524         mdp->vlan_num_ids++;
2525 
2526         /* The controller has one VLAN tag HW filter. So, if the filter is
2527          * already enabled, the driver disables it and the filte
2528          */
2529         if (mdp->vlan_num_ids > 1) {
2530                 /* disable VLAN filter */
2531                 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2532                 return 0;
2533         }
2534 
2535         sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2536                          vtag_reg_index);
2537 
2538         return 0;
2539 }
2540 
2541 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2542                                    __be16 proto, u16 vid)
2543 {
2544         struct sh_eth_private *mdp = netdev_priv(ndev);
2545         int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2546 
2547         if (unlikely(!mdp->cd->tsu))
2548                 return -EPERM;
2549 
2550         /* No filtering if vid = 0 */
2551         if (!vid)
2552                 return 0;
2553 
2554         mdp->vlan_num_ids--;
2555         sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2556 
2557         return 0;
2558 }
2559 
2560 /* SuperH's TSU register init function */
2561 static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2562 {
2563         if (sh_eth_is_rz_fast_ether(mdp)) {
2564                 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2565                 return;
2566         }
2567 
2568         sh_eth_tsu_write(mdp, 0, TSU_FWEN0);    /* Disable forward(0->1) */
2569         sh_eth_tsu_write(mdp, 0, TSU_FWEN1);    /* Disable forward(1->0) */
2570         sh_eth_tsu_write(mdp, 0, TSU_FCM);      /* forward fifo 3k-3k */
2571         sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2572         sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2573         sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2574         sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2575         sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2576         sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2577         sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
2578         if (sh_eth_is_gether(mdp)) {
2579                 sh_eth_tsu_write(mdp, 0, TSU_QTAG0);    /* Disable QTAG(0->1) */
2580                 sh_eth_tsu_write(mdp, 0, TSU_QTAG1);    /* Disable QTAG(1->0) */
2581         } else {
2582                 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);   /* Disable QTAG(0->1) */
2583                 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);   /* Disable QTAG(1->0) */
2584         }
2585         sh_eth_tsu_write(mdp, 0, TSU_FWSR);     /* all interrupt status clear */
2586         sh_eth_tsu_write(mdp, 0, TSU_FWINMK);   /* Disable all interrupt */
2587         sh_eth_tsu_write(mdp, 0, TSU_TEN);      /* Disable all CAM entry */
2588         sh_eth_tsu_write(mdp, 0, TSU_POST1);    /* Disable CAM entry [ 0- 7] */
2589         sh_eth_tsu_write(mdp, 0, TSU_POST2);    /* Disable CAM entry [ 8-15] */
2590         sh_eth_tsu_write(mdp, 0, TSU_POST3);    /* Disable CAM entry [16-23] */
2591         sh_eth_tsu_write(mdp, 0, TSU_POST4);    /* Disable CAM entry [24-31] */
2592 }
2593 
2594 /* MDIO bus release function */
2595 static int sh_mdio_release(struct sh_eth_private *mdp)
2596 {
2597         /* unregister mdio bus */
2598         mdiobus_unregister(mdp->mii_bus);
2599 
2600         /* free bitbang info */
2601         free_mdio_bitbang(mdp->mii_bus);
2602 
2603         return 0;
2604 }
2605 
2606 /* MDIO bus init function */
2607 static int sh_mdio_init(struct sh_eth_private *mdp,
2608                         struct sh_eth_plat_data *pd)
2609 {
2610         int ret, i;
2611         struct bb_info *bitbang;
2612         struct platform_device *pdev = mdp->pdev;
2613         struct device *dev = &mdp->pdev->dev;
2614 
2615         /* create bit control struct for PHY */
2616         bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
2617         if (!bitbang)
2618                 return -ENOMEM;
2619 
2620         /* bitbang init */
2621         bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2622         bitbang->set_gate = pd->set_mdio_gate;
2623         bitbang->mdi_msk = PIR_MDI;
2624         bitbang->mdo_msk = PIR_MDO;
2625         bitbang->mmd_msk = PIR_MMD;
2626         bitbang->mdc_msk = PIR_MDC;
2627         bitbang->ctrl.ops = &bb_ops;
2628 
2629         /* MII controller setting */
2630         mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2631         if (!mdp->mii_bus)
2632                 return -ENOMEM;
2633 
2634         /* Hook up MII support for ethtool */
2635         mdp->mii_bus->name = "sh_mii";
2636         mdp->mii_bus->parent = dev;
2637         snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2638                  pdev->name, pdev->id);
2639 
2640         /* PHY IRQ */
2641         mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int),
2642                                                GFP_KERNEL);
2643         if (!mdp->mii_bus->irq) {
2644                 ret = -ENOMEM;
2645                 goto out_free_bus;
2646         }
2647 
2648         /* register MDIO bus */
2649         if (dev->of_node) {
2650                 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
2651         } else {
2652                 for (i = 0; i < PHY_MAX_ADDR; i++)
2653                         mdp->mii_bus->irq[i] = PHY_POLL;
2654                 if (pd->phy_irq > 0)
2655                         mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
2656 
2657                 ret = mdiobus_register(mdp->mii_bus);
2658         }
2659 
2660         if (ret)
2661                 goto out_free_bus;
2662 
2663         return 0;
2664 
2665 out_free_bus:
2666         free_mdio_bitbang(mdp->mii_bus);
2667         return ret;
2668 }
2669 
2670 static const u16 *sh_eth_get_register_offset(int register_type)
2671 {
2672         const u16 *reg_offset = NULL;
2673 
2674         switch (register_type) {
2675         case SH_ETH_REG_GIGABIT:
2676                 reg_offset = sh_eth_offset_gigabit;
2677                 break;
2678         case SH_ETH_REG_FAST_RZ:
2679                 reg_offset = sh_eth_offset_fast_rz;
2680                 break;
2681         case SH_ETH_REG_FAST_RCAR:
2682                 reg_offset = sh_eth_offset_fast_rcar;
2683                 break;
2684         case SH_ETH_REG_FAST_SH4:
2685                 reg_offset = sh_eth_offset_fast_sh4;
2686                 break;
2687         case SH_ETH_REG_FAST_SH3_SH2:
2688                 reg_offset = sh_eth_offset_fast_sh3_sh2;
2689                 break;
2690         default:
2691                 break;
2692         }
2693 
2694         return reg_offset;
2695 }
2696 
2697 static const struct net_device_ops sh_eth_netdev_ops = {
2698         .ndo_open               = sh_eth_open,
2699         .ndo_stop               = sh_eth_close,
2700         .ndo_start_xmit         = sh_eth_start_xmit,
2701         .ndo_get_stats          = sh_eth_get_stats,
2702         .ndo_tx_timeout         = sh_eth_tx_timeout,
2703         .ndo_do_ioctl           = sh_eth_do_ioctl,
2704         .ndo_validate_addr      = eth_validate_addr,
2705         .ndo_set_mac_address    = eth_mac_addr,
2706         .ndo_change_mtu         = eth_change_mtu,
2707 };
2708 
2709 static const struct net_device_ops sh_eth_netdev_ops_tsu = {
2710         .ndo_open               = sh_eth_open,
2711         .ndo_stop               = sh_eth_close,
2712         .ndo_start_xmit         = sh_eth_start_xmit,
2713         .ndo_get_stats          = sh_eth_get_stats,
2714         .ndo_set_rx_mode        = sh_eth_set_multicast_list,
2715         .ndo_vlan_rx_add_vid    = sh_eth_vlan_rx_add_vid,
2716         .ndo_vlan_rx_kill_vid   = sh_eth_vlan_rx_kill_vid,
2717         .ndo_tx_timeout         = sh_eth_tx_timeout,
2718         .ndo_do_ioctl           = sh_eth_do_ioctl,
2719         .ndo_validate_addr      = eth_validate_addr,
2720         .ndo_set_mac_address    = eth_mac_addr,
2721         .ndo_change_mtu         = eth_change_mtu,
2722 };
2723 
2724 #ifdef CONFIG_OF
2725 static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2726 {
2727         struct device_node *np = dev->of_node;
2728         struct sh_eth_plat_data *pdata;
2729         const char *mac_addr;
2730 
2731         pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2732         if (!pdata)
2733                 return NULL;
2734 
2735         pdata->phy_interface = of_get_phy_mode(np);
2736 
2737         mac_addr = of_get_mac_address(np);
2738         if (mac_addr)
2739                 memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
2740 
2741         pdata->no_ether_link =
2742                 of_property_read_bool(np, "renesas,no-ether-link");
2743         pdata->ether_link_active_low =
2744                 of_property_read_bool(np, "renesas,ether-link-active-low");
2745 
2746         return pdata;
2747 }
2748 
2749 static const struct of_device_id sh_eth_match_table[] = {
2750         { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
2751         { .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
2752         { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
2753         { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
2754         { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
2755         { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
2756         { }
2757 };
2758 MODULE_DEVICE_TABLE(of, sh_eth_match_table);
2759 #else
2760 static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2761 {
2762         return NULL;
2763 }
2764 #endif
2765 
2766 static int sh_eth_drv_probe(struct platform_device *pdev)
2767 {
2768         int ret, devno = 0;
2769         struct resource *res;
2770         struct net_device *ndev = NULL;
2771         struct sh_eth_private *mdp = NULL;
2772         struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
2773         const struct platform_device_id *id = platform_get_device_id(pdev);
2774 
2775         /* get base addr */
2776         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2777         if (unlikely(res == NULL)) {
2778                 dev_err(&pdev->dev, "invalid resource\n");
2779                 return -EINVAL;
2780         }
2781 
2782         ndev = alloc_etherdev(sizeof(struct sh_eth_private));
2783         if (!ndev)
2784                 return -ENOMEM;
2785 
2786         pm_runtime_enable(&pdev->dev);
2787         pm_runtime_get_sync(&pdev->dev);
2788 
2789         /* The sh Ether-specific entries in the device structure. */
2790         ndev->base_addr = res->start;
2791         devno = pdev->id;
2792         if (devno < 0)
2793                 devno = 0;
2794 
2795         ndev->dma = -1;
2796         ret = platform_get_irq(pdev, 0);
2797         if (ret < 0) {
2798                 ret = -ENODEV;
2799                 goto out_release;
2800         }
2801         ndev->irq = ret;
2802 
2803         SET_NETDEV_DEV(ndev, &pdev->dev);
2804 
2805         mdp = netdev_priv(ndev);
2806         mdp->num_tx_ring = TX_RING_SIZE;
2807         mdp->num_rx_ring = RX_RING_SIZE;
2808         mdp->addr = devm_ioremap_resource(&pdev->dev, res);
2809         if (IS_ERR(mdp->addr)) {
2810                 ret = PTR_ERR(mdp->addr);
2811                 goto out_release;
2812         }
2813 
2814         spin_lock_init(&mdp->lock);
2815         mdp->pdev = pdev;
2816 
2817         if (pdev->dev.of_node)
2818                 pd = sh_eth_parse_dt(&pdev->dev);
2819         if (!pd) {
2820                 dev_err(&pdev->dev, "no platform data\n");
2821                 ret = -EINVAL;
2822                 goto out_release;
2823         }
2824 
2825         /* get PHY ID */
2826         mdp->phy_id = pd->phy;
2827         mdp->phy_interface = pd->phy_interface;
2828         /* EDMAC endian */
2829         mdp->edmac_endian = pd->edmac_endian;
2830         mdp->no_ether_link = pd->no_ether_link;
2831         mdp->ether_link_active_low = pd->ether_link_active_low;
2832 
2833         /* set cpu data */
2834         if (id) {
2835                 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
2836         } else  {
2837                 const struct of_device_id *match;
2838 
2839                 match = of_match_device(of_match_ptr(sh_eth_match_table),
2840                                         &pdev->dev);
2841                 mdp->cd = (struct sh_eth_cpu_data *)match->data;
2842         }
2843         mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
2844         if (!mdp->reg_offset) {
2845                 dev_err(&pdev->dev, "Unknown register type (%d)\n",
2846                         mdp->cd->register_type);
2847                 ret = -EINVAL;
2848                 goto out_release;
2849         }
2850         sh_eth_set_default_cpu_data(mdp->cd);
2851 
2852         /* set function */
2853         if (mdp->cd->tsu)
2854                 ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
2855         else
2856                 ndev->netdev_ops = &sh_eth_netdev_ops;
2857         ndev->ethtool_ops = &sh_eth_ethtool_ops;
2858         ndev->watchdog_timeo = TX_TIMEOUT;
2859 
2860         /* debug message level */
2861         mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
2862 
2863         /* read and set MAC address */
2864         read_mac_address(ndev, pd->mac_addr);
2865         if (!is_valid_ether_addr(ndev->dev_addr)) {
2866                 dev_warn(&pdev->dev,
2867                          "no valid MAC address supplied, using a random one.\n");
2868                 eth_hw_addr_random(ndev);
2869         }
2870 
2871         /* ioremap the TSU registers */
2872         if (mdp->cd->tsu) {
2873                 struct resource *rtsu;
2874                 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2875                 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
2876                 if (IS_ERR(mdp->tsu_addr)) {
2877                         ret = PTR_ERR(mdp->tsu_addr);
2878                         goto out_release;
2879                 }
2880                 mdp->port = devno % 2;
2881                 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
2882         }
2883 
2884         /* initialize first or needed device */
2885         if (!devno || pd->needs_init) {
2886                 if (mdp->cd->chip_reset)
2887                         mdp->cd->chip_reset(ndev);
2888 
2889                 if (mdp->cd->tsu) {
2890                         /* TSU init (Init only)*/
2891                         sh_eth_tsu_init(mdp);
2892                 }
2893         }
2894 
2895         /* MDIO bus init */
2896         ret = sh_mdio_init(mdp, pd);
2897         if (ret) {
2898                 dev_err(&ndev->dev, "failed to initialise MDIO\n");
2899                 goto out_release;
2900         }
2901 
2902         netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
2903 
2904         /* network device register */
2905         ret = register_netdev(ndev);
2906         if (ret)
2907                 goto out_napi_del;
2908 
2909         /* print device information */
2910         netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
2911                     (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2912 
2913         pm_runtime_put(&pdev->dev);
2914         platform_set_drvdata(pdev, ndev);
2915 
2916         return ret;
2917 
2918 out_napi_del:
2919         netif_napi_del(&mdp->napi);
2920         sh_mdio_release(mdp);
2921 
2922 out_release:
2923         /* net_dev free */
2924         if (ndev)
2925                 free_netdev(ndev);
2926 
2927         pm_runtime_put(&pdev->dev);
2928         pm_runtime_disable(&pdev->dev);
2929         return ret;
2930 }
2931 
2932 static int sh_eth_drv_remove(struct platform_device *pdev)
2933 {
2934         struct net_device *ndev = platform_get_drvdata(pdev);
2935         struct sh_eth_private *mdp = netdev_priv(ndev);
2936 
2937         unregister_netdev(ndev);
2938         netif_napi_del(&mdp->napi);
2939         sh_mdio_release(mdp);
2940         pm_runtime_disable(&pdev->dev);
2941         free_netdev(ndev);
2942 
2943         return 0;
2944 }
2945 
2946 #ifdef CONFIG_PM
2947 static int sh_eth_runtime_nop(struct device *dev)
2948 {
2949         /* Runtime PM callback shared between ->runtime_suspend()
2950          * and ->runtime_resume(). Simply returns success.
2951          *
2952          * This driver re-initializes all registers after
2953          * pm_runtime_get_sync() anyway so there is no need
2954          * to save and restore registers here.
2955          */
2956         return 0;
2957 }
2958 
2959 static const struct dev_pm_ops sh_eth_dev_pm_ops = {
2960         .runtime_suspend = sh_eth_runtime_nop,
2961         .runtime_resume = sh_eth_runtime_nop,
2962 };
2963 #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
2964 #else
2965 #define SH_ETH_PM_OPS NULL
2966 #endif
2967 
2968 static struct platform_device_id sh_eth_id_table[] = {
2969         { "sh7619-ether", (kernel_ulong_t)&sh7619_data },
2970         { "sh771x-ether", (kernel_ulong_t)&sh771x_data },
2971         { "sh7724-ether", (kernel_ulong_t)&sh7724_data },
2972         { "sh7734-gether", (kernel_ulong_t)&sh7734_data },
2973         { "sh7757-ether", (kernel_ulong_t)&sh7757_data },
2974         { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
2975         { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
2976         { "r7s72100-ether", (kernel_ulong_t)&r7s72100_data },
2977         { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
2978         { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
2979         { "r8a7790-ether", (kernel_ulong_t)&r8a779x_data },
2980         { "r8a7791-ether", (kernel_ulong_t)&r8a779x_data },
2981         { }
2982 };
2983 MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
2984 
2985 static struct platform_driver sh_eth_driver = {
2986         .probe = sh_eth_drv_probe,
2987         .remove = sh_eth_drv_remove,
2988         .id_table = sh_eth_id_table,
2989         .driver = {
2990                    .name = CARDNAME,
2991                    .pm = SH_ETH_PM_OPS,
2992                    .of_match_table = of_match_ptr(sh_eth_match_table),
2993         },
2994 };
2995 
2996 module_platform_driver(sh_eth_driver);
2997 
2998 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
2999 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
3000 MODULE_LICENSE("GPL v2");
3001 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us